filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_23917 | # 2. Для списка реализовать обмен значений соседних элементов,
# т.е. Значениями обмениваются элементы с индексами 0 и 1, 2
# и 3 и т.д. При нечетном количестве элементов последний сохранить
# на своем месте. Для заполнения списка элементов необходимо
# использовать функцию input().
# создание пустого списка
my_list = []
# ввод количества элементов списка
n = int(input("Введите количество элементов списка: "))
# проверка значения
if n < 1:
raise ValueError('Ошибка! Допустимое значение >= 1.')
# наполнение списка
for i in range(n):
e = int(input(f'Введите целочисленный {i+1}-й элемент списка: '))
my_list.append(e)
# вывод списка
print(f'Начальный список: {my_list}')
# выборка по нечетным индексам списка
for i in range(1, len(my_list), 2):
x, y = my_list[i-1], my_list[i]
my_list[i-1], my_list[i] = y, x
# вывод списка
print(f'Итоговый список: {my_list}')
|
the-stack_106_23918 | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Any
from copy import copy
import io
import os
import pkgutil
import collections
import numpy as np
import pymc3 as pm
import theano.tensor as tt
import theano
__all__ = [
'get_data',
'GeneratorAdapter',
'Minibatch',
'align_minibatches',
'Data',
]
def get_data(filename):
"""Returns a BytesIO object for a package data file.
Parameters
----------
filename: str
file to load
Returns
-------
BytesIO of the data
"""
data_pkg = 'pymc3.examples'
return io.BytesIO(pkgutil.get_data(data_pkg, os.path.join('data', filename)))
class GenTensorVariable(tt.TensorVariable):
def __init__(self, op, type, name=None):
super().__init__(type=type, name=name)
self.op = op
def set_gen(self, gen):
self.op.set_gen(gen)
def set_default(self, value):
self.op.set_default(value)
def clone(self):
cp = self.__class__(self.op, self.type, self.name)
cp.tag = copy(self.tag)
return cp
class GeneratorAdapter:
"""
Helper class that helps to infer data type of generator with looking
at the first item, preserving the order of the resulting generator
"""
def make_variable(self, gop, name=None):
var = GenTensorVariable(gop, self.tensortype, name)
var.tag.test_value = self.test_value
return var
def __init__(self, generator):
if not pm.vartypes.isgenerator(generator):
raise TypeError('Object should be generator like')
self.test_value = pm.smartfloatX(copy(next(generator)))
# make pickling potentially possible
self._yielded_test_value = False
self.gen = generator
self.tensortype = tt.TensorType(
self.test_value.dtype,
((False, ) * self.test_value.ndim))
# python3 generator
def __next__(self):
if not self._yielded_test_value:
self._yielded_test_value = True
return self.test_value
else:
return pm.smartfloatX(copy(next(self.gen)))
# python2 generator
next = __next__
def __iter__(self):
return self
def __eq__(self, other):
return id(self) == id(other)
def __hash__(self):
return hash(id(self))
class Minibatch(tt.TensorVariable):
"""Multidimensional minibatch that is pure TensorVariable
Parameters
----------
data: np.ndarray
initial data
batch_size: ``int`` or ``List[int|tuple(size, random_seed)]``
batch size for inference, random seed is needed
for child random generators
dtype: ``str``
cast data to specific type
broadcastable: tuple[bool]
change broadcastable pattern that defaults to ``(False, ) * ndim``
name: ``str``
name for tensor, defaults to "Minibatch"
random_seed: ``int``
random seed that is used by default
update_shared_f: ``callable``
returns :class:`ndarray` that will be carefully
stored to underlying shared variable
you can use it to change source of
minibatches programmatically
in_memory_size: ``int`` or ``List[int|slice|Ellipsis]``
data size for storing in ``theano.shared``
Attributes
----------
shared: shared tensor
Used for storing data
minibatch: minibatch tensor
Used for training
Notes
-----
Below is a common use case of Minibatch with variational inference.
Importantly, we need to make PyMC3 "aware" that a minibatch is being used in inference.
Otherwise, we will get the wrong :math:`logp` for the model.
the density of the model ``logp`` that is affected by Minibatch. See more in the examples below.
To do so, we need to pass the ``total_size`` parameter to the observed node, which correctly scales
the density of the model ``logp`` that is affected by Minibatch. See more in the examples below.
Examples
--------
Consider we have `data` as follows:
>>> data = np.random.rand(100, 100)
if we want a 1d slice of size 10 we do
>>> x = Minibatch(data, batch_size=10)
Note that your data is cast to ``floatX`` if it is not integer type
But you still can add the ``dtype`` kwarg for :class:`Minibatch`
if you need more control.
If we want 10 sampled rows and columns
``[(size, seed), (size, seed)]`` we can use
>>> x = Minibatch(data, batch_size=[(10, 42), (10, 42)], dtype='int32')
>>> assert str(x.dtype) == 'int32'
Or, more simply, we can use the default random seed = 42
``[size, size]``
>>> x = Minibatch(data, batch_size=[10, 10])
In the above, `x` is a regular :class:`TensorVariable` that supports any math operations:
>>> assert x.eval().shape == (10, 10)
You can pass the Minibatch `x` to your desired model:
>>> with pm.Model() as model:
... mu = pm.Flat('mu')
... sd = pm.HalfNormal('sd')
... lik = pm.Normal('lik', mu, sd, observed=x, total_size=(100, 100))
Then you can perform regular Variational Inference out of the box
>>> with model:
... approx = pm.fit()
Important note: :class:``Minibatch`` has ``shared``, and ``minibatch`` attributes
you can call later:
>>> x.set_value(np.random.laplace(size=(100, 100)))
and minibatches will be then from new storage
it directly affects ``x.shared``.
A less convenient convenient, but more explicit, way to achieve the same
thing:
>>> x.shared.set_value(pm.floatX(np.random.laplace(size=(100, 100))))
The programmatic way to change storage is as follows
I import ``partial`` for simplicity
>>> from functools import partial
>>> datagen = partial(np.random.laplace, size=(100, 100))
>>> x = Minibatch(datagen(), batch_size=10, update_shared_f=datagen)
>>> x.update_shared()
To be more concrete about how we create a minibatch, here is a demo:
1. create a shared variable
>>> shared = theano.shared(data)
2. take a random slice of size 10:
>>> ridx = pm.tt_rng().uniform(size=(10,), low=0, high=data.shape[0]-1e-10).astype('int64')
3) take the resulting slice:
>>> minibatch = shared[ridx]
That's done. Now you can use this minibatch somewhere else.
You can see that the implementation does not require a fixed shape
for the shared variable. Feel free to use that if needed.
*FIXME: What is "that" which we can use here? A fixed shape? Should this say
"but feel free to put a fixed shape on the shared variable, if appropriate?"*
Suppose you need to make some replacements in the graph, e.g. change the minibatch to testdata
>>> node = x ** 2 # arbitrary expressions on minibatch `x`
>>> testdata = pm.floatX(np.random.laplace(size=(1000, 10)))
Then you should create a `dict` with replacements:
>>> replacements = {x: testdata}
>>> rnode = theano.clone(node, replacements)
>>> assert (testdata ** 2 == rnode.eval()).all()
*FIXME: In the following, what is the **reason** to replace the Minibatch variable with
its shared variable? And in the following, the `rnode` is a **new** node, not a modification
of a previously existing node, correct?*
To replace a minibatch with its shared variable you should do
the same things. The Minibatch variable is accessible through the `minibatch` attribute.
For example
>>> replacements = {x.minibatch: x.shared}
>>> rnode = theano.clone(node, replacements)
For more complex slices some more code is needed that can seem not so clear
>>> moredata = np.random.rand(10, 20, 30, 40, 50)
The default ``total_size`` that can be passed to ``PyMC3`` random node
is then ``(10, 20, 30, 40, 50)`` but can be less verbose in some cases
1. Advanced indexing, ``total_size = (10, Ellipsis, 50)``
>>> x = Minibatch(moredata, [2, Ellipsis, 10])
We take the slice only for the first and last dimension
>>> assert x.eval().shape == (2, 20, 30, 40, 10)
2. Skipping a particular dimension, ``total_size = (10, None, 30)``:
>>> x = Minibatch(moredata, [2, None, 20])
>>> assert x.eval().shape == (2, 20, 20, 40, 50)
3. Mixing both of these together, ``total_size = (10, None, 30, Ellipsis, 50)``:
>>> x = Minibatch(moredata, [2, None, 20, Ellipsis, 10])
>>> assert x.eval().shape == (2, 20, 20, 40, 10)
"""
RNG = collections.defaultdict(list) # type: Dict[str, List[Any]]
@theano.configparser.change_flags(compute_test_value='raise')
def __init__(self, data, batch_size=128, dtype=None, broadcastable=None, name='Minibatch',
random_seed=42, update_shared_f=None, in_memory_size=None):
if dtype is None:
data = pm.smartfloatX(np.asarray(data))
else:
data = np.asarray(data, dtype)
in_memory_slc = self.make_static_slices(in_memory_size)
self.shared = theano.shared(data[in_memory_slc])
self.update_shared_f = update_shared_f
self.random_slc = self.make_random_slices(self.shared.shape, batch_size, random_seed)
minibatch = self.shared[self.random_slc]
if broadcastable is None:
broadcastable = (False, ) * minibatch.ndim
minibatch = tt.patternbroadcast(minibatch, broadcastable)
self.minibatch = minibatch
super().__init__(self.minibatch.type, None, None, name=name)
theano.Apply(
theano.compile.view_op,
inputs=[self.minibatch], outputs=[self])
self.tag.test_value = copy(self.minibatch.tag.test_value)
def rslice(self, total, size, seed):
if size is None:
return slice(None)
elif isinstance(size, int):
rng = pm.tt_rng(seed)
Minibatch.RNG[id(self)].append(rng)
return (rng
.uniform(size=(size, ), low=0.0, high=pm.floatX(total) - 1e-16)
.astype('int64'))
else:
raise TypeError('Unrecognized size type, %r' % size)
def __del__(self):
del Minibatch.RNG[id(self)]
@staticmethod
def make_static_slices(user_size):
if user_size is None:
return [Ellipsis]
elif isinstance(user_size, int):
return slice(None, user_size)
elif isinstance(user_size, (list, tuple)):
slc = list()
for i in user_size:
if isinstance(i, int):
slc.append(i)
elif i is None:
slc.append(slice(None))
elif i is Ellipsis:
slc.append(Ellipsis)
elif isinstance(i, slice):
slc.append(i)
else:
raise TypeError('Unrecognized size type, %r' % user_size)
return slc
else:
raise TypeError('Unrecognized size type, %r' % user_size)
def make_random_slices(self, in_memory_shape, batch_size, default_random_seed):
if batch_size is None:
return [Ellipsis]
elif isinstance(batch_size, int):
slc = [self.rslice(in_memory_shape[0], batch_size, default_random_seed)]
elif isinstance(batch_size, (list, tuple)):
def check(t):
if t is Ellipsis or t is None:
return True
else:
if isinstance(t, (tuple, list)):
if not len(t) == 2:
return False
else:
return isinstance(t[0], int) and isinstance(t[1], int)
elif isinstance(t, int):
return True
else:
return False
# end check definition
if not all(check(t) for t in batch_size):
raise TypeError('Unrecognized `batch_size` type, expected '
'int or List[int|tuple(size, random_seed)] where '
'size and random seed are both ints, got %r' %
batch_size)
batch_size = [
(i, default_random_seed) if isinstance(i, int) else i
for i in batch_size
]
shape = in_memory_shape
if Ellipsis in batch_size:
sep = batch_size.index(Ellipsis)
begin = batch_size[:sep]
end = batch_size[sep + 1:]
if Ellipsis in end:
raise ValueError('Double Ellipsis in `batch_size` is restricted, got %r' %
batch_size)
if len(end) > 0:
shp_mid = shape[sep:-len(end)]
mid = [tt.arange(s) for s in shp_mid]
else:
mid = []
else:
begin = batch_size
end = []
mid = []
if (len(begin) + len(end)) > len(in_memory_shape.eval()):
raise ValueError('Length of `batch_size` is too big, '
'number of ints is bigger that ndim, got %r'
% batch_size)
if len(end) > 0:
shp_end = shape[-len(end):]
else:
shp_end = np.asarray([])
shp_begin = shape[:len(begin)]
slc_begin = [self.rslice(shp_begin[i], t[0], t[1])
if t is not None else tt.arange(shp_begin[i])
for i, t in enumerate(begin)]
slc_end = [self.rslice(shp_end[i], t[0], t[1])
if t is not None else tt.arange(shp_end[i])
for i, t in enumerate(end)]
slc = slc_begin + mid + slc_end
else:
raise TypeError('Unrecognized size type, %r' % batch_size)
return pm.theanof.ix_(*slc)
def update_shared(self):
if self.update_shared_f is None:
raise NotImplementedError("No `update_shared_f` was provided to `__init__`")
self.set_value(np.asarray(self.update_shared_f(), self.dtype))
def set_value(self, value):
self.shared.set_value(np.asarray(value, self.dtype))
def clone(self):
ret = self.type()
ret.name = self.name
ret.tag = copy(self.tag)
return ret
def align_minibatches(batches=None):
if batches is None:
for rngs in Minibatch.RNG.values():
for rng in rngs:
rng.seed()
else:
for b in batches:
if not isinstance(b, Minibatch):
raise TypeError('{b} is not a Minibatch')
for rng in Minibatch.RNG[id(b)]:
rng.seed()
class Data:
"""Data container class that wraps the theano ``SharedVariable`` class
and lets the model be aware of its inputs and outputs.
Parameters
----------
name: str
The name for this variable
value
A value to associate with this variable
Examples
--------
>>> import pymc3 as pm
>>> import numpy as np
>>> # We generate 10 datasets
>>> true_mu = [np.random.randn() for _ in range(10)]
>>> observed_data = [mu + np.random.randn(20) for mu in true_mu]
>>> with pm.Model() as model:
... data = pm.Data('data', observed_data[0])
... mu = pm.Normal('mu', 0, 10)
... pm.Normal('y', mu=mu, sigma=1, observed=data)
>>> # Generate one trace for each dataset
>>> traces = []
>>> for data_vals in observed_data:
... with model:
... # Switch out the observed dataset
... pm.set_data({'data': data_vals})
... traces.append(pm.sample())
To set the value of the data container variable, check out
:func:`pymc3.model.set_data()`.
For more information, take a look at this example notebook
https://docs.pymc.io/notebooks/data_container.html
"""
def __new__(self, name, value):
if isinstance(value, list):
value = np.array(value)
# Add data container to the named variables of the model.
try:
model = pm.Model.get_context()
except TypeError:
raise TypeError(
"No model on context stack, which is needed to instantiate a data container. "
"Add variable inside a 'with model:' block."
)
name = model.name_for(name)
# `pm.model.pandas_to_array` takes care of parameter `value` and
# transforms it to something digestible for pymc3
shared_object = theano.shared(pm.model.pandas_to_array(value), name)
# To draw the node for this variable in the graphviz Digraph we need
# its shape.
shared_object.dshape = tuple(shared_object.shape.eval())
model.add_random_variable(shared_object)
return shared_object
|
the-stack_106_23920 | import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import animation
import numpy as np
fig, ax = plt.subplots(2, 2, figsize=(9, 8.5))
topend = 50
angle_list = []
int_angle = []
count = []
sidelengths = []
area_list = []
for x in range(3,topend): #finds regular angle of an n-gon (where n = 3-topend)
degrees = 180*(x-2)
angle = degrees/x
angle_list.append(angle)
if angle.is_integer(): #checks if angle is an integer
int_angle.append(angle)
count.append(x)
length = 2*np.cos(angle*np.pi/(180*2))
height = np.sin(angle*np.pi/(180*2))
sidelengths.append(length)
area = 2*x*(1/2 * length/2 * height)
area_list.append(area)
if x == topend-1: #approximates pi using the area of the largest n-gon
print('pi is approx: ' + str(area))
#creates circle
circle = mpatches.Circle([0.5,0.5], 0.5,
ec="none",
facecolor = 'r',
alpha = 0.7
)
#creates polygon (triangle)
polygon = mpatches.RegularPolygon([0.5,0.5],3,0.5)
#Creates each frame for bottom right graph
def update_vector(i, polygon):
ax[1,1].clear()
polygon = mpatches.RegularPolygon([0.5,0.5],i+3,
0.5,
alpha=0.5
)
ax[1,1].add_patch(circle)
ax[1,1].add_patch(polygon)
return polygon,
#hides x,y axis ticks for bottom right graph
ax[1,1].axes.xaxis.set_visible(False)
ax[1,1].axes.yaxis.set_visible(False)
#animates bottom right frame
anim = animation.FuncAnimation(fig, update_vector,
fargs = (polygon,),
frames=20, #sets max n-gon in animation
interval=650,
)
#creates each graph
ax[0,0].scatter(range(3,topend),angle_list)
ax[0,0].scatter(count,int_angle)
ax[0,0].set(xlabel='Number of Sides', ylabel= 'Degree of Angle')
ax[0,1].scatter(range(3,topend),area_list)
ax[0,1].set(xlabel='Number of Sides', ylabel= 'Area')
ax[1,0].scatter(range(3,topend),sidelengths)
ax[1,0].set(xlabel='Number of Sides', ylabel= 'Lenght of Side')
#sets title
fig.suptitle('Approaching the Area of a Circle', fontsize = 20)
plt.show()
# #**Uncomment to save file as mp4 or gif**
# import matplotlib
# matplotlib.use("Agg")
# # Set up formatting for the movie files
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=6, metadata=dict(artist='Me'), bitrate=1800)
# anim.save('app_circle_graphic.mp4', writer=writer) #save as mp4, requires ffmpeg
# anim.save('app_circle_graphic.gif', writer='imagemagick', fps=6) # save as gif requries, imagemagick and wand
|
the-stack_106_23922 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateDataLabelingJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_async]
from google.cloud import aiplatform_v1beta1
async def sample_create_data_labeling_job():
"""Snippet for create_data_labeling_job"""
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
data_labeling_job = aiplatform_v1beta1.DataLabelingJob()
data_labeling_job.display_name = "display_name_value"
data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}"
data_labeling_job.labeler_count = 1375
data_labeling_job.instruction_uri = "instruction_uri_value"
data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
data_labeling_job.inputs.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateDataLabelingJobRequest(
parent="projects/{project}/locations/{location}",
data_labeling_job=data_labeling_job,
)
# Make the request
response = await client.create_data_labeling_job(request=request)
# Handle response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_async]
|
the-stack_106_23924 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import time
import sys
import logging
import paddle.fluid as fluid
from ....log_helper import get_logger
from .utils import load_variable_data, set_variable_data, stable_sigmoid, quant_tensor, dequant_tensor, _channelwise_quant_axis1_ops, calculate_quant_cos_error, bias_correction_w
_logger = get_logger(__name__,
logging.INFO,
fmt='%(asctime)s-%(levelname)s: %(message)s')
GAMMA = -0.1
ZETA = 1.1
def compute_soft_rounding(alpha_v):
return fluid.layers.clip(fluid.layers.sigmoid(alpha_v) * (ZETA - GAMMA) +
GAMMA,
min=0,
max=1)
def compute_soft_rounding_np(alpha_v):
return np.clip(stable_sigmoid(alpha_v) * (ZETA - GAMMA) + GAMMA,
a_min=0,
a_max=1)
class AdaRoundLoss(object):
def __init__(self, reg_param=0.01, default_beta_range=(20, 2)):
self.default_reg_param = reg_param
self.default_beta_range = default_beta_range
def compute_recon_loss(self, ada_quantized_output, orig_output):
square_cost = fluid.layers.square_error_cost(ada_quantized_output,
orig_output)
recon_loss = fluid.layers.reduce_mean(
fluid.layers.reduce_sum(square_cost, dim=-1))
return recon_loss
def compute_round_loss(self, alpha_v, warm_start, beta):
def round_loss_fn():
# compute rectified sigmoid of parameter 'alpha' which maps it between zero and one
h_v = compute_soft_rounding(alpha_v)
# calculate regularization term - which ensures parameter to converge to exactly zeros and ones
# at the end of optimization
reg_term = fluid.layers.reduce_sum(
-fluid.layers.pow(fluid.layers.abs(2 * h_v - 1), factor=beta) +
1)
# calculate the rounding loss
round_loss = self.default_reg_param * reg_term
return round_loss
round_loss = fluid.layers.cond(
warm_start, lambda: fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.0), round_loss_fn)
return round_loss
def compute_beta(self, max_iter, cur_iter, warm_start):
# Start and stop beta for annealing of rounding loss (start_beta, end_beta)
start_beta, end_beta = self.default_beta_range
# iteration at end of warm start period, which is 20% of max iterations
warm_start_end_iter = warm_start * max_iter
# compute relative iteration of current iteration
rel_iter = (cur_iter - warm_start_end_iter) / (max_iter -
warm_start_end_iter)
beta = end_beta + 0.5 * (start_beta -
end_beta) * (1 + np.cos(rel_iter * np.pi))
return beta
class AdaRound(object):
def __init__(self,
scale,
weight_tensor,
scope=None,
weight_var_name=None,
weight_op_type=None,
is_train=True,
num_iterations=1000):
self.is_train = is_train
self.num_iterations = num_iterations
self.warm_start = 0.1
self.weight_bits = 8
self.offset = 0. # zero-point offset
self.adaround_loss = AdaRoundLoss()
self.ori_weight_tensor = weight_tensor
self.scale = scale
self.scope = scope
self.quant_axis = 0
if weight_op_type in _channelwise_quant_axis1_ops:
self.quant_axis = 1
self.weight_var_name = weight_var_name
self.alpha_name = weight_var_name + ".alpha"
self.initialize_alpha(weight_tensor.copy(), scale, weight_var_name)
def initialize_alpha(self, tensor, scale, var_name):
"""
Initializes alpha parameter, same shape as the weight tensor
"""
tensor_scale = quant_tensor(tensor, scale, quant_axis=self.quant_axis)
tensor_floor = np.floor(tensor_scale)
tensor = tensor_scale - tensor_floor
alpha = -np.log((ZETA - GAMMA) / (tensor - GAMMA) - 1)
self.alpha_v = fluid.layers.create_parameter(
shape=alpha.shape,
dtype="float32",
name=var_name + ".alpha",
default_initializer=fluid.initializer.NumpyArrayInitializer(alpha))
def _calculate_output_with_adarounded_weights(self, program, place, exe,
data, fp32_fetch_list,
weight_tensor_dequant):
set_variable_data(self.scope, place, self.weight_var_name,
weight_tensor_dequant)
adaround_out_tensor = exe.run(program=program,
feed=data,
fetch_list=[fp32_fetch_list],
return_numpy=True,
scope=self.scope)
return adaround_out_tensor
def _calculate_quant_weight(self):
np_alpha = load_variable_data(self.scope, self.alpha_name)
h_alpha = compute_soft_rounding_np(np_alpha)
# Scale the tensor
tensor_scale = quant_tensor(self.ori_weight_tensor.copy(),
self.scale,
quant_axis=self.quant_axis)
weight_tensor = np.floor(tensor_scale)
# Adaround the tensor
weight_tensor_quant = np.add(weight_tensor, h_alpha)
return weight_tensor_quant
def _calculate_adarounded_weights(self):
weight_tensor_quant = self._calculate_quant_weight()
# Dequantize the tensor
weight_tensor_dequant = dequant_tensor(weight_tensor_quant +
self.offset,
self.scale,
quant_axis=self.quant_axis)
return weight_tensor_dequant
def update_final_weights(self):
weight_tensor_quant = self._calculate_quant_weight()
return weight_tensor_quant
def get_loss(self, beta, warm_start, adaround_out_tensor, orig_out_tensor):
round_loss = self.adaround_loss.compute_round_loss(
self.alpha_v, warm_start, beta)
recon_loss = self.adaround_loss.compute_recon_loss(
adaround_out_tensor, orig_out_tensor)
loss = round_loss + recon_loss
losses = {
'loss': loss,
'round_loss': round_loss,
'recon_loss': recon_loss
}
return losses
def update_beta_warm(self, cur_iteration):
warm_start = cur_iteration < self.num_iterations * self.warm_start
beta = self.adaround_loss.compute_beta(self.num_iterations,
cur_iteration, self.warm_start)
return beta, warm_start
def run_adaround(data_loader,
fp32_program,
fetch_list,
exe,
scope,
place,
quantized_op_pairs,
weight_op_pairs,
scale_dict,
num_iterations=1000,
lr=0.001,
bias_correction=False,
fast_mode=True):
fetch_op_name = fetch_list[0].name
final_weight_tensor_quant_dict = {}
for weight_var_name, quant_op_out_name in quantized_op_pairs.items():
_logger.info('Start adaround op: {}'.format(weight_var_name))
weight_op_type = weight_op_pairs[weight_var_name]
# get scale and weight tensor
weight_var_tensor = load_variable_data(scope, weight_var_name)
scale = scale_dict[weight_var_name]
fp32_fetch_list = None
for _op in fp32_program.global_block().ops:
if _op.type == "fetch":
_op._rename_input(fetch_op_name, quant_op_out_name)
fp32_fetch_list = fp32_program.global_block().var(
quant_op_out_name)
fetch_op_name = quant_op_out_name
# build adaround program
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_iteration_per_drop_scope = 1
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
# initialize adaround
adaround = AdaRound(scale,
weight_var_tensor,
scope=scope,
weight_var_name=weight_var_name,
weight_op_type=weight_op_type,
num_iterations=num_iterations)
orig_out_tensor = fluid.data(name='orig_out_tensor',
shape=fp32_fetch_list.shape,
dtype='float32')
adaround_out_tensor = fluid.data(name='adaround_out_tensor',
shape=fp32_fetch_list.shape,
dtype='float32')
beta_tensor = fluid.data(name='beta',
shape=[1],
dtype='float32')
warm_start_tensor = fluid.data(name='warm_start',
shape=[1],
dtype='bool')
train_fetches_loss = adaround.get_loss(beta_tensor,
warm_start_tensor,
adaround_out_tensor,
orig_out_tensor)
optimizer = fluid.optimizer.Adam(learning_rate=lr)
loss = train_fetches_loss['loss']
optimizer.minimize(loss)
exe.run(startup_program)
start_time = time.time()
prev_start_time = start_time
for i, data in enumerate(data_loader()):
prev_start_time = start_time
start_time = time.time()
# run fp32 model
np_orig_out_tensor = exe.run(program=fp32_program,
feed=data,
fetch_list=[fp32_fetch_list],
return_numpy=True,
scope=scope)
adaround_weight_tensor_dequant = adaround._calculate_adarounded_weights(
)
np_adaround_out_tensor = adaround._calculate_output_with_adarounded_weights(
fp32_program, place, exe, data, fp32_fetch_list,
adaround_weight_tensor_dequant)
# If the cosine distance of the two tensor is small, skip training
cos_error = calculate_quant_cos_error(np_orig_out_tensor[0],
np_adaround_out_tensor[0])
if fast_mode and cos_error > 0.99:
_logger.info("The cosine error is small, skip training.")
break
beta, warm_start = adaround.update_beta_warm(i)
feed_dict = {
'orig_out_tensor': np_orig_out_tensor[0],
'adaround_out_tensor': np_adaround_out_tensor[0],
'beta': beta,
'warm_start': warm_start
}
out = exe.run(
train_program,
feed=feed_dict,
fetch_list=[v.name for v in train_fetches_loss.values()],
return_numpy=True)
_logger.info(
"Iter {:d}, lr {:.5f}, loss {:.5f}, loss_round {:.5f}, loss_recon {:.5f}, time {:.5f}s"
.format(i, lr, np.mean(out[0]), np.mean(out[1]),
np.mean(out[2]), start_time - prev_start_time))
sys.stdout.flush()
if i == num_iterations:
break
final_weight_tensor_quant_dict[
weight_var_name] = adaround.update_final_weights()
if bias_correction:
final_weight_tensor_quant_dict[weight_var_name] = bias_correction_w(
weight_var_tensor,
final_weight_tensor_quant_dict[weight_var_name],
scale,
adaround.quant_axis,
weight_bits=adaround.weight_bits)
del adaround
# update adarounded calibrated weights
for weight_var_name in quantized_op_pairs.keys():
set_variable_data(scope, place, weight_var_name,
final_weight_tensor_quant_dict[weight_var_name])
|
the-stack_106_23926 | import typing
from qtpy.QtCore import QPoint, QRectF, QSize, QSizeF, Qt
from qtpy.QtGui import QCursor, QPainter
from qtpy.QtWidgets import (QGraphicsDropShadowEffect, QGraphicsItem,
QGraphicsObject, QGraphicsProxyWidget,
QGraphicsSceneContextMenuEvent,
QGraphicsSceneHoverEvent, QGraphicsSceneMouseEvent,
QSizePolicy, QStyleOptionGraphicsItem, QWidget)
from .enums import ConnectionPolicy
from .node_connection_interaction import NodeConnectionInteraction
from .port import PortType
class NodeGraphicsObject(QGraphicsObject):
def __init__(self, scene, node):
super().__init__()
self._scene = scene
self._node = node
self._locked = False
self._proxy_widget = None
self._scene.addItem(self)
self.setFlag(QGraphicsItem.ItemDoesntPropagateOpacityToChildren, True)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setFlag(QGraphicsItem.ItemIsFocusable, True)
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, True)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
self._style = node.model.style
node_style = self._style.node
effect = QGraphicsDropShadowEffect()
effect.setOffset(4, 4)
effect.setBlurRadius(20)
effect.setColor(node_style.shadow_color)
self.setGraphicsEffect(effect)
self.setOpacity(node_style.opacity)
self.setAcceptHoverEvents(True)
self.setZValue(0)
self.embed_q_widget()
# connect to the move signals to emit the move signals in FlowScene
def on_move():
self._scene.node_moved.emit(self._node, self.pos())
self.xChanged.connect(on_move)
self.yChanged.connect(on_move)
def _cleanup(self):
if self._scene is not None:
self._scene.removeItem(self)
self._scene = None
def __del__(self):
try:
self._cleanup()
except Exception:
...
def setPos(self, pos):
super().setPos(pos)
self.move_connections()
@property
def node(self):
"""
Node
Returns
-------
value : Node
"""
return self._node
def boundingRect(self) -> QRectF:
"""
boundingRect
Returns
-------
value : QRectF
"""
return self._node.geometry.bounding_rect
def set_geometry_changed(self):
self.prepareGeometryChange()
def move_connections(self):
"""
Visits all attached connections and corrects their corresponding end points.
"""
for conn in self._node.state.all_connections:
conn.graphics_object.move()
def lock(self, locked: bool):
"""
Lock
Parameters
----------
locked : bool
"""
self._locked = locked
self.setFlag(QGraphicsItem.ItemIsMovable, not locked)
self.setFlag(QGraphicsItem.ItemIsFocusable, not locked)
self.setFlag(QGraphicsItem.ItemIsSelectable, not locked)
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget):
"""
Paint
Parameters
----------
painter : QPainter
option : QStyleOptionGraphicsItem
widget : QWidget
"""
from .node_painter import NodePainter
# TODO
painter.setClipRect(option.exposedRect)
NodePainter.paint(painter, self._node, self._scene,
node_style=self._style.node,
connection_style=self._style.connection,
)
def itemChange(self, change: QGraphicsItem.GraphicsItemChange, value: typing.Any) -> typing.Any:
"""
itemChange
Parameters
----------
change : QGraphicsItem.GraphicsItemChange
value : any
Returns
-------
value : any
"""
if change == self.ItemPositionChange and self.scene():
self.move_connections()
return super().itemChange(change, value)
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""
mousePressEvent
Parameters
----------
event : QGraphicsSceneMouseEvent
"""
if self._locked:
return
# deselect all other items after self one is selected
if not self.isSelected() and not (event.modifiers() & Qt.ControlModifier):
self._scene.clearSelection()
node_geometry = self._node.geometry
for port_to_check in (PortType.input, PortType.output):
# TODO do not pass sceneTransform
port = node_geometry.check_hit_scene_point(port_to_check,
event.scenePos(),
self.sceneTransform())
if not port:
continue
connections = port.connections
# start dragging existing connection
if connections and port_to_check == PortType.input:
conn, = connections
interaction = NodeConnectionInteraction(self._node, conn, self._scene)
interaction.disconnect(port_to_check)
elif port_to_check == PortType.output:
# initialize new Connection
out_policy = port.connection_policy
if connections and out_policy == ConnectionPolicy.one:
conn, = connections
self._scene.delete_connection(conn)
# TODO_UPSTREAM: add to FlowScene
connection = self._scene.create_connection(port)
connection.graphics_object.grabMouse()
pos = QPoint(event.pos().x(), event.pos().y())
geom = self._node.geometry
state = self._node.state
if self._node.model.resizable() and geom.resize_rect.contains(pos):
state.resizing = True
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
"""
mouseMoveEvent
Parameters
----------
event : QGraphicsSceneMouseEvent
"""
geom = self._node.geometry
state = self._node.state
if state.resizing:
diff = event.pos() - event.lastPos()
w = self._node.model.embedded_widget()
if w:
self.prepareGeometryChange()
old_size = w.size() + QSize(diff.x(), diff.y())
w.setFixedSize(old_size)
old_size_f = QSizeF(old_size)
self._proxy_widget.setMinimumSize(old_size_f)
self._proxy_widget.setMaximumSize(old_size_f)
self._proxy_widget.setPos(geom.widget_position)
geom.recalculate_size()
self.update()
self.move_connections()
event.accept()
else:
super().mouseMoveEvent(event)
if event.lastPos() != event.pos():
self.move_connections()
event.ignore()
bounding = self.mapToScene(self.boundingRect()).boundingRect()
r = self.scene().sceneRect().united(bounding)
self.scene().setSceneRect(r)
def mouseReleaseEvent(self, event: QGraphicsSceneMouseEvent):
"""
mouseReleaseEvent
Parameters
----------
event : QGraphicsSceneMouseEvent
"""
state = self._node.state
state.resizing = False
super().mouseReleaseEvent(event)
# position connections precisely after fast node move
self.move_connections()
def hoverEnterEvent(self, event: QGraphicsSceneHoverEvent):
"""
hoverEnterEvent
Parameters
----------
event : QGraphicsSceneHoverEvent
"""
# void
# bring all the colliding nodes to background
overlap_items = self.collidingItems()
for item in overlap_items:
if item.zValue() > 0.0:
item.setZValue(0.0)
# bring self node forward
self.setZValue(1.0)
self._node.geometry.hovered = True
self.update()
self._scene.node_hovered.emit(self._node, event.screenPos())
event.accept()
def hoverLeaveEvent(self, event: QGraphicsSceneHoverEvent):
"""
hoverLeaveEvent
Parameters
----------
event : QGraphicsSceneHoverEvent
"""
self._node.geometry.hovered = False
self.update()
self._scene.node_hover_left.emit(self._node)
event.accept()
def hoverMoveEvent(self, event: QGraphicsSceneHoverEvent):
"""
hoverMoveEvent
Parameters
----------
q_graphics_scene_hover_event : QGraphicsSceneHoverEvent
"""
pos = event.pos()
geom = self._node.geometry
if (self._node.model.resizable() and
geom.resize_rect.contains(QPoint(pos.x(), pos.y()))):
self.setCursor(QCursor(Qt.SizeFDiagCursor))
else:
self.setCursor(QCursor())
event.accept()
def mouseDoubleClickEvent(self, event: QGraphicsSceneMouseEvent):
"""
mouseDoubleClickEvent
Parameters
----------
event : QGraphicsSceneMouseEvent
"""
super().mouseDoubleClickEvent(event)
self._scene.node_double_clicked.emit(self._node)
def contextMenuEvent(self, event: QGraphicsSceneContextMenuEvent):
"""
contextMenuEvent
Parameters
----------
event : QGraphicsSceneContextMenuEvent
"""
self._scene.node_context_menu.emit(
self._node, event.scenePos(), event.screenPos())
def embed_q_widget(self):
geom = self._node.geometry
widget = self._node.model.embedded_widget()
if widget is None:
return
self._proxy_widget = QGraphicsProxyWidget(self)
self._proxy_widget.setWidget(widget)
self._proxy_widget.setPreferredWidth(5)
geom.recalculate_size()
# If the widget wants to use as much vertical space as possible, set it
# to have the geomtry's equivalent_widget_height.
if widget.sizePolicy().verticalPolicy() & QSizePolicy.ExpandFlag:
self._proxy_widget.setMinimumHeight(geom.equivalent_widget_height())
self._proxy_widget.setPos(geom.widget_position)
self.update()
self._proxy_widget.setOpacity(1.0)
self._proxy_widget.setFlag(QGraphicsItem.ItemIgnoresParentOpacity)
|
the-stack_106_23928 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
import urllib.parse
from opentelemetry import baggage
from opentelemetry.context import get_current
from opentelemetry.context.context import Context
from opentelemetry.propagators import textmap
class W3CBaggagePropagator(textmap.TextMapPropagator):
"""Extracts and injects Baggage which is used to annotate telemetry."""
_MAX_HEADER_LENGTH = 8192
_MAX_PAIR_LENGTH = 4096
_MAX_PAIRS = 180
_BAGGAGE_HEADER_NAME = "baggage"
def extract(
self,
carrier: textmap.CarrierT,
context: typing.Optional[Context] = None,
getter: textmap.Getter = textmap.default_getter,
) -> Context:
"""Extract Baggage from the carrier.
See
`opentelemetry.propagators.textmap.TextMapPropagator.extract`
"""
if context is None:
context = get_current()
header = _extract_first_element(
getter.get(carrier, self._BAGGAGE_HEADER_NAME)
)
if not header or len(header) > self._MAX_HEADER_LENGTH:
return context
baggage_entries = header.split(",")
total_baggage_entries = self._MAX_PAIRS
for entry in baggage_entries:
if total_baggage_entries <= 0:
return context
total_baggage_entries -= 1
if len(entry) > self._MAX_PAIR_LENGTH:
continue
try:
name, value = entry.split("=", 1)
except Exception: # pylint: disable=broad-except
continue
context = baggage.set_baggage(
urllib.parse.unquote(name).strip(),
urllib.parse.unquote(value).strip(),
context=context,
)
return context
def inject(
self,
carrier: textmap.CarrierT,
context: typing.Optional[Context] = None,
setter: textmap.Setter = textmap.default_setter,
) -> None:
"""Injects Baggage into the carrier.
See
`opentelemetry.propagators.textmap.TextMapPropagator.inject`
"""
baggage_entries = baggage.get_all(context=context)
if not baggage_entries:
return
baggage_string = _format_baggage(baggage_entries)
setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
@property
def fields(self) -> typing.Set[str]:
"""Returns a set with the fields set in `inject`."""
return {self._BAGGAGE_HEADER_NAME}
def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
return ",".join(
key + "=" + urllib.parse.quote_plus(str(value))
for key, value in baggage_entries.items()
)
def _extract_first_element(
items: typing.Optional[typing.Iterable[textmap.CarrierT]],
) -> typing.Optional[textmap.CarrierT]:
if items is None:
return None
return next(iter(items), None)
|
the-stack_106_23930 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leonardo_form_pegastudio', '0031_auto_20180214_2209'),
]
operations = [
migrations.AlterField(
model_name='pegastudioproducts',
name='file',
field=models.FileField(upload_to=b'documents/%Y/%m/%d/', verbose_name='Soubor'),
),
]
|
the-stack_106_23932 | import collections
import torch
from .. import dndarray, tiling
from .. import factories
__all__ = ["qr"]
def qr(a, tiles_per_proc=1, calc_q=True, overwrite_a=False):
"""
Calculates the QR decomposition of a 2D DNDarray.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is upper-triangular.
Parameters
----------
a : DNDarray
DNDarray which will be decomposed
tiles_per_proc : int, singlt element torch.Tensor
optional, default: 1
number of tiles per process to operate on
calc_q : bool
optional, default: True
whether or not to calculate Q
if True, function returns (Q, R)
if False, function returns (None, R)
overwrite_a : bool
optional, default: False
if True, function overwrites the DNDarray a, with R
if False, a new array will be created for R
Returns
-------
namedtuple of Q and R
if calc_q == True, function returns QR(Q=Q, R=R)
if calc_q == False, function returns QR(Q=None, R=R)
Notes
-----
This function is built on top of PyTorch's QR function. torch.qr() using LAPACK on the backend.
Basic information about QR factorization/decomposition can be found at
https://en.wikipedia.org/wiki/QR_factorization
The algorithms are based on the CAQR and TSQRalgorithms. For more information see references.
References
----------
[0] W. Zheng, F. Song, L. Lin, and Z. Chen, “Scaling Up Parallel Computation of Tiled QR
Factorizations by a Distributed Scheduling Runtime System and Analytical Modeling,”
Parallel Processing Letters, vol. 28, no. 01, p. 1850004, 2018.
[1] Bilel Hadri, Hatem Ltaief, Emmanuel Agullo, Jack Dongarra. Tile QR Factorization with
Parallel Panel Processing for Multicore Architectures. 24th IEEE International Parallel
and DistributedProcessing Symposium (IPDPS 2010), Apr 2010, Atlanta, United States.
inria-00548899
[2] Gene H. Golub and Charles F. Van Loan. 1996. Matrix Computations (3rd Ed.).
Examples
--------
>>> a = ht.random.randn(9, 6, split=0)
>>> qr = ht.linalg.qr(a)
>>> print(ht.allclose(a, ht.dot(qr.Q, qr.R)))
[0/1] True
[1/1] True
>>> st = torch.randn(9, 6)
>>> a = ht.array(st, split=1)
>>> a_comp = ht.array(st, split=0)
>>> q, r = ht.linalg.qr(a)
>>> print(ht.allclose(a_comp, ht.dot(q, r)))
[0/1] True
[1/1] True
"""
if not isinstance(a, dndarray.DNDarray):
raise TypeError("'a' must be a DNDarray")
if not isinstance(tiles_per_proc, (int, torch.Tensor)):
raise TypeError(
"tiles_per_proc must be an int or a torch.Tensor, "
"currently {}".format(type(tiles_per_proc))
)
if not isinstance(calc_q, bool):
raise TypeError("calc_q must be a bool, currently {}".format(type(calc_q)))
if not isinstance(overwrite_a, bool):
raise TypeError("overwrite_a must be a bool, currently {}".format(type(overwrite_a)))
if isinstance(tiles_per_proc, torch.Tensor):
raise ValueError(
"tiles_per_proc must be a single element torch.Tenor or int, "
"currently has {} entries".format(tiles_per_proc.numel())
)
if len(a.shape) != 2:
raise ValueError("Array 'a' must be 2 dimensional")
QR = collections.namedtuple("QR", "Q, R")
if a.split is None:
q, r = a._DNDarray__array.qr(some=False)
q = factories.array(q, device=a.device)
r = factories.array(r, device=a.device)
ret = QR(q if calc_q else None, r)
return ret
# =============================== Prep work ====================================================
r = a if overwrite_a else a.copy()
# r.create_square_diag_tiles(tiles_per_proc=tiles_per_proc)
r_tiles = tiling.SquareDiagTiles(arr=r, tiles_per_proc=tiles_per_proc)
tile_columns = r_tiles.tile_columns
tile_rows = r_tiles.tile_rows
if calc_q:
q = factories.eye(
(r.gshape[0], r.gshape[0]), split=0, dtype=r.dtype, comm=r.comm, device=r.device
)
q_tiles = tiling.SquareDiagTiles(arr=q, tiles_per_proc=tiles_per_proc)
q_tiles.match_tiles(r_tiles)
else:
q, q_tiles = None, None
# ==============================================================================================
if a.split == 0:
rank = r.comm.rank
active_procs = torch.arange(r.comm.size, device=r.device.torch_device)
empties = torch.nonzero(input=r_tiles.lshape_map[..., 0] == 0, as_tuple=False)
empties = empties[0] if empties.numel() > 0 else []
for e in empties:
active_procs = active_procs[active_procs != e]
tile_rows_per_pr_trmd = r_tiles.tile_rows_per_process[: active_procs[-1] + 1]
q_dict = {}
q_dict_waits = {}
proc_tile_start = torch.cumsum(
torch.tensor(tile_rows_per_pr_trmd, device=r.device.torch_device), dim=0
)
# ------------------------------------ R Calculation ---------------------------------------
for col in range(
tile_columns
): # for each tile column (need to do the last rank separately)
# for each process need to do local qr
not_completed_processes = torch.nonzero(
input=col < proc_tile_start, as_tuple=False
).flatten()
if rank not in not_completed_processes or rank not in active_procs:
# if the process is done calculating R the break the loop
break
diag_process = not_completed_processes[0]
__split0_r_calc(
r_tiles=r_tiles,
q_dict=q_dict,
q_dict_waits=q_dict_waits,
col_num=col,
diag_pr=diag_process,
not_completed_prs=not_completed_processes,
)
# ------------------------------------- Q Calculation --------------------------------------
if calc_q:
for col in range(tile_columns):
__split0_q_loop(
col=col,
r_tiles=r_tiles,
proc_tile_start=proc_tile_start,
active_procs=active_procs,
q0_tiles=q_tiles,
q_dict=q_dict,
q_dict_waits=q_dict_waits,
)
elif a.split == 1:
# loop over the tile columns
lp_cols = tile_columns if a.gshape[0] > a.gshape[1] else tile_rows
for dcol in range(lp_cols): # dcol is the diagonal column
__split1_qr_loop(dcol=dcol, r_tiles=r_tiles, q0_tiles=q_tiles, calc_q=calc_q)
r.balance_()
if q is not None:
q.balance_()
ret = QR(q, r)
return ret
def __split0_global_q_dict_set(q_dict_col, col, r_tiles, q_tiles, global_merge_dict=None):
"""
The function takes the original Q tensors from the global QR calculation and sets them to
the keys which corresponds with their tile coordinates in Q. this returns a separate dictionary,
it does NOT set the values of Q
Parameters
----------
q_dict_col : Dict
The dictionary of the Q values for a given column, should be given as q_dict[col]
col : int, single element torch.Tensor
current column for which Q is being calculated for
r_tiles : tiling.SquareDiagTiles
tiling object for 'r'
q_tiles : tiling.SquareDiagTiles
tiling object for Q0
global_merge_dict : Dict, optional
the output of the function will be in this dictionary
Form of output: key index : torch.Tensor
Returns
-------
None
"""
# q is already created, the job of this function is to create the group the merging q's together
# it takes the merge qs, splits them, then puts them into a new dictionary
proc_tile_start = torch.cumsum(
torch.tensor(r_tiles.tile_rows_per_process, device=r_tiles.arr._DNDarray__array.device),
dim=0,
)
diag_proc = torch.nonzero(input=proc_tile_start > col, as_tuple=False)[0].item()
proc_tile_start = torch.cat(
(torch.tensor([0], device=r_tiles.arr._DNDarray__array.device), proc_tile_start[:-1]), dim=0
)
# 1: create caqr dictionary
# need to have empty lists for all tiles in q
global_merge_dict = {} if global_merge_dict is None else global_merge_dict
# intended to be used as [row][column] -> data
# 2: loop over keys in the dictionary
merge_list = list(q_dict_col.keys())
merge_list.sort()
# todo: possible improvement -> make the keys have the process they are on as well,
# then can async get them if they are not on the diagonal process
for key in merge_list:
# this loops over all of the Qs for col and creates the dictionary for the pr Q merges
p0 = key.find("p0")
p1 = key.find("p1")
end = key.find("e")
r0 = int(key[p0 + 2 : p1])
r1 = int(key[p1 + 2 : end])
lp_q = q_dict_col[key][0]
base_size = q_dict_col[key][1]
# cut the q into 4 bits (end of base array)
# todo: modify this so that it will get what is needed from the process,
# instead of gathering all the qs
top_left = lp_q[: base_size[0], : base_size[0]]
top_right = lp_q[: base_size[0], base_size[0] :]
bottom_left = lp_q[base_size[0] :, : base_size[0]]
bottom_right = lp_q[base_size[0] :, base_size[0] :]
# need to adjust the keys to be the global row
if diag_proc == r0:
col1 = col
else:
col1 = proc_tile_start[r0].item()
col2 = proc_tile_start[r1].item()
# col0 and col1 are the columns numbers
# r0 and r1 are the ranks
jdim = (col1, col1)
kdim = (col1, col2)
ldim = (col2, col1)
mdim = (col2, col2)
# if there are no elements on that location than set it as the tile
# 1. get keys of what already has data
curr_keys = set(global_merge_dict.keys())
# 2. determine which tiles need to be touched/created
# these are the keys which are to be multiplied by the q in the current loop
# for matrix of form: | J K |
# | L M |
mult_keys_00 = [(i, col1) for i in range(q_tiles.tile_columns)] # (J)
# (J) -> inds: (i, col0)(col0, col0) -> set at (i, col0)
mult_keys_01 = [(i, col1) for i in range(q_tiles.tile_columns)] # (K)
# (K) -> inds: (i, col0)(col0, col1) -> set at (i, col1)
mult_keys_10 = [(i, col2) for i in range(q_tiles.tile_columns)] # (L)
# (L) -> inds: (i, col1)(col1, col0) -> set at (i, col0)
mult_keys_11 = [(i, col2) for i in range(q_tiles.tile_columns)] # (M)
# (M) -> inds: (i, col1)(col1, col1) -> set at (i, col1)
# if there are no elements in the mult_keys then set the element to the same place
s00 = set(mult_keys_00) & curr_keys
s01 = set(mult_keys_01) & curr_keys
s10 = set(mult_keys_10) & curr_keys
s11 = set(mult_keys_11) & curr_keys
hold_dict = global_merge_dict.copy()
# (J)
if not len(s00):
global_merge_dict[jdim] = top_left
else: # -> do the mm for all of the mult keys
for k in s00:
global_merge_dict[k[0], jdim[1]] = hold_dict[k] @ top_left
# (K)
if not len(s01):
# check that we are not overwriting here
global_merge_dict[kdim] = top_right
else: # -> do the mm for all of the mult keys
for k in s01:
global_merge_dict[k[0], kdim[1]] = hold_dict[k] @ top_right
# (L)
if not len(s10):
# check that we are not overwriting here
global_merge_dict[ldim] = bottom_left
else: # -> do the mm for all of the mult keys
for k in s10:
global_merge_dict[k[0], ldim[1]] = hold_dict[k] @ bottom_left
# (M)
if not len(s11):
# check that we are not overwriting here
global_merge_dict[mdim] = bottom_right
else: # -> do the mm for all of the mult keys
for k in s11:
global_merge_dict[k[0], mdim[1]] = hold_dict[k] @ bottom_right
return global_merge_dict
def __split0_r_calc(r_tiles, q_dict, q_dict_waits, col_num, diag_pr, not_completed_prs):
"""
Function to do the QR calculations to calculate the global R of the array `a`.
This function uses a binary merge structure in the globabl R merge.
Parameters
----------
r_tiles : tiling.SquareDiagTiles
tiling object for 'r'
q_dict : Dict
dictionary to save the calculated Q matrices to
q_dict_waits : Dict
dictionary to save the calculated Q matrices to which are
not calculated on the diagonal process
col_num : int
the current column of the the R calculation
diag_pr : int
rank of the process which has the tile which lies along the diagonal
not_completed_prs : torch.Tensor
tensor of the processes which have not yet finished calculating R
Returns
-------
None
"""
tile_rows_proc = r_tiles.tile_rows_per_process
comm = r_tiles.arr.comm
rank = comm.rank
lcl_tile_row = 0 if rank != diag_pr else col_num - sum(tile_rows_proc[:rank])
# only work on the processes which have not computed the final result
q_dict[col_num] = {}
q_dict_waits[col_num] = {}
# --------------- local QR calc -----------------------------------------------------
base_tile = r_tiles.local_get(key=(slice(lcl_tile_row, None), col_num))
q1, r1 = base_tile.qr(some=False)
q_dict[col_num]["l0"] = [q1, base_tile.shape]
r_tiles.local_set(key=(slice(lcl_tile_row, None), col_num), value=r1)
if col_num != r_tiles.tile_columns - 1:
base_rest = r_tiles.local_get((slice(lcl_tile_row, None), slice(col_num + 1, None)))
loc_rest = torch.matmul(q1.T, base_rest)
r_tiles.local_set(key=(slice(lcl_tile_row, None), slice(col_num + 1, None)), value=loc_rest)
# --------------- global QR calc (binary merge) -------------------------------------
rem1 = None
rem2 = None
offset = not_completed_prs[0]
loop_size_remaining = not_completed_prs.clone()
completed = False if loop_size_remaining.size()[0] > 1 else True
procs_remaining = loop_size_remaining.size()[0]
loop = 0
while not completed:
if procs_remaining % 2 == 1:
# if the number of processes active is odd need to save the remainders
if rem1 is None:
rem1 = loop_size_remaining[-1]
loop_size_remaining = loop_size_remaining[:-1]
elif rem2 is None:
rem2 = loop_size_remaining[-1]
loop_size_remaining = loop_size_remaining[:-1]
if rank not in loop_size_remaining and rank not in [rem1, rem2]:
break # if the rank is done then exit the loop
# send the data to the corresponding processes
half_prs_rem = torch.floor_divide(procs_remaining, 2)
zipped = zip(
loop_size_remaining.flatten()[:half_prs_rem],
loop_size_remaining.flatten()[half_prs_rem:],
)
for pr in zipped:
pr0, pr1 = int(pr[0].item()), int(pr[1].item())
__split0_merge_tile_rows(
pr0=pr0,
pr1=pr1,
column=col_num,
rank=rank,
r_tiles=r_tiles,
diag_process=diag_pr,
key=str(loop) + "p0" + str(pr0) + "p1" + str(pr1) + "e",
q_dict=q_dict,
)
__split0_send_q_to_diag_pr(
col=col_num,
pr0=pr0,
pr1=pr1,
diag_process=diag_pr,
comm=comm,
q_dict=q_dict,
key=str(loop) + "p0" + str(pr0) + "p1" + str(pr1) + "e",
q_dict_waits=q_dict_waits,
q_dtype=r_tiles.arr.dtype.torch_type(),
q_device=r_tiles.arr._DNDarray__array.device,
)
loop_size_remaining = loop_size_remaining[: -1 * (half_prs_rem)]
procs_remaining = loop_size_remaining.size()[0]
if rem1 is not None and rem2 is not None:
# combine rem1 and rem2 in the same way as the other nodes,
# then save the results in rem1 to be used later
__split0_merge_tile_rows(
pr0=rem2,
pr1=rem1,
column=col_num,
rank=rank,
r_tiles=r_tiles,
diag_process=diag_pr,
key=str(loop) + "p0" + str(int(rem1)) + "p1" + str(int(rem2)) + "e",
q_dict=q_dict if q_dict is not None else {},
)
rem1, rem2 = int(rem1), int(rem2)
__split0_send_q_to_diag_pr(
col=col_num,
pr0=rem2,
pr1=rem1,
diag_process=diag_pr,
key=str(loop) + "p0" + str(int(rem1)) + "p1" + str(int(rem2)) + "e",
q_dict=q_dict if q_dict is not None else {},
comm=comm,
q_dict_waits=q_dict_waits,
q_dtype=r_tiles.arr.dtype.torch_type(),
q_device=r_tiles.arr._DNDarray__array.device,
)
rem1 = rem2
rem2 = None
loop += 1
if rem1 is not None and rem2 is None and procs_remaining == 1:
# combine rem1 with process 0 (offset) and set completed to True
# this should be the last thing that happens
__split0_merge_tile_rows(
pr0=offset,
pr1=rem1,
column=col_num,
rank=rank,
r_tiles=r_tiles,
diag_process=diag_pr,
key=str(loop) + "p0" + str(int(offset)) + "p1" + str(int(rem1)) + "e",
q_dict=q_dict,
)
offset, rem1 = int(offset), int(rem1)
__split0_send_q_to_diag_pr(
col=col_num,
pr0=offset,
pr1=rem1,
diag_process=diag_pr,
key=str(loop) + "p0" + str(int(offset)) + "p1" + str(int(rem1)) + "e",
q_dict=q_dict,
comm=comm,
q_dict_waits=q_dict_waits,
q_dtype=r_tiles.arr.dtype.torch_type(),
q_device=r_tiles.arr._DNDarray__array.device,
)
rem1 = None
completed = True if procs_remaining == 1 and rem1 is None and rem2 is None else False
def __split0_merge_tile_rows(pr0, pr1, column, rank, r_tiles, diag_process, key, q_dict):
"""
Merge two tile rows, take their QR, and apply it to the trailing process
This will modify 'a' and set the value of the q_dict[column][key]
with [Q, upper.shape, lower.shape].
Parameters
----------
pr0, pr1 : int, int
Process ranks of the processes to be used
column : int
the current process of the QR calculation
rank : int
the rank of the process
r_tiles : ht.tiles.SquareDiagTiles
tiling object used for getting/setting the tiles required
diag_process : int
The rank of the process which has the tile along the diagonal for the given column
Returns
-------
None, sets the value of q_dict[column][key] with [Q, upper.shape, lower.shape]
"""
if rank not in [pr0, pr1]:
return
pr0 = pr0.item() if isinstance(pr0, torch.Tensor) else pr0
pr1 = pr1.item() if isinstance(pr1, torch.Tensor) else pr1
comm = r_tiles.arr.comm
upper_row = sum(r_tiles.tile_rows_per_process[:pr0]) if pr0 != diag_process else column
lower_row = sum(r_tiles.tile_rows_per_process[:pr1]) if pr1 != diag_process else column
upper_inds = r_tiles.get_start_stop(key=(upper_row, column))
lower_inds = r_tiles.get_start_stop(key=(lower_row, column))
upper_size = (upper_inds[1] - upper_inds[0], upper_inds[3] - upper_inds[2])
lower_size = (lower_inds[1] - lower_inds[0], lower_inds[3] - lower_inds[2])
a_torch_device = r_tiles.arr._DNDarray__array.device
# upper adjustments
if upper_size[0] < upper_size[1] and r_tiles.tile_rows_per_process[pr0] > 1:
# end of dim0 (upper_inds[1]) is equal to the size in dim1
upper_inds = list(upper_inds)
upper_inds[1] = upper_inds[0] + upper_size[1]
upper_size = (upper_inds[1] - upper_inds[0], upper_inds[3] - upper_inds[2])
if lower_size[0] < lower_size[1] and r_tiles.tile_rows_per_process[pr1] > 1:
# end of dim0 (upper_inds[1]) is equal to the size in dim1
lower_inds = list(lower_inds)
lower_inds[1] = lower_inds[0] + lower_size[1]
lower_size = (lower_inds[1] - lower_inds[0], lower_inds[3] - lower_inds[2])
if rank == pr0:
# need to use lloc on r_tiles.arr with the indices
upper = r_tiles.arr.lloc[upper_inds[0] : upper_inds[1], upper_inds[2] : upper_inds[3]]
comm.Send(upper.clone(), dest=pr1, tag=986)
lower = torch.zeros(lower_size, dtype=r_tiles.arr.dtype.torch_type(), device=a_torch_device)
comm.Recv(lower, source=pr1, tag=4363)
else: # rank == pr1:
lower = r_tiles.arr.lloc[lower_inds[0] : lower_inds[1], lower_inds[2] : lower_inds[3]]
upper = torch.zeros(upper_size, dtype=r_tiles.arr.dtype.torch_type(), device=a_torch_device)
comm.Recv(upper, source=pr0, tag=986)
comm.Send(lower.clone(), dest=pr0, tag=4363)
q_merge, r = torch.cat((upper, lower), dim=0).qr(some=False)
upp = r[: upper.shape[0]]
low = r[upper.shape[0] :]
if rank == pr0:
r_tiles.arr.lloc[upper_inds[0] : upper_inds[1], upper_inds[2] : upper_inds[3]] = upp
else: # rank == pr1:
r_tiles.arr.lloc[lower_inds[0] : lower_inds[1], lower_inds[2] : lower_inds[3]] = low
if column < r_tiles.tile_columns - 1:
upper_rest_size = (upper_size[0], r_tiles.arr.gshape[1] - upper_inds[3])
lower_rest_size = (lower_size[0], r_tiles.arr.gshape[1] - lower_inds[3])
if rank == pr0:
upper_rest = r_tiles.arr.lloc[upper_inds[0] : upper_inds[1], upper_inds[3] :]
lower_rest = torch.zeros(
lower_rest_size, dtype=r_tiles.arr.dtype.torch_type(), device=a_torch_device
)
comm.Send(upper_rest.clone(), dest=pr1, tag=98654)
comm.Recv(lower_rest, source=pr1, tag=436364)
else: # rank == pr1:
lower_rest = r_tiles.arr.lloc[lower_inds[0] : lower_inds[1], lower_inds[3] :]
upper_rest = torch.zeros(
upper_rest_size, dtype=r_tiles.arr.dtype.torch_type(), device=a_torch_device
)
comm.Recv(upper_rest, source=pr0, tag=98654)
comm.Send(lower_rest.clone(), dest=pr0, tag=436364)
cat_tensor = torch.cat((upper_rest, lower_rest), dim=0)
new_rest = torch.matmul(q_merge.t(), cat_tensor)
# the data for upper rest is a slice of the new_rest, need to slice only the 0th dim
upp = new_rest[: upper_rest.shape[0]]
low = new_rest[upper_rest.shape[0] :]
if rank == pr0:
r_tiles.arr.lloc[upper_inds[0] : upper_inds[1], upper_inds[3] :] = upp
# set the lower rest
else: # rank == pr1:
r_tiles.arr.lloc[lower_inds[0] : lower_inds[1], lower_inds[3] :] = low
q_dict[column][key] = [q_merge, upper.shape, lower.shape]
def __split0_send_q_to_diag_pr(
col, pr0, pr1, diag_process, comm, q_dict, key, q_dict_waits, q_dtype, q_device
):
"""
This function sends the merged Q to the diagonal process. Buffered send it used for sending
Q. This is needed for the Q calculation when two processes are merged and neither is the diagonal
process.
Parameters
----------
col : int
The current column used in the parent QR loop
pr0, pr1 : int, int
Rank of processes 0 and 1. These are the processes used in the calculation of q
diag_process : int
The rank of the process which has the tile along the diagonal for the given column
comm : MPICommunication (ht.DNDarray.comm)
The communicator used. (Intended as the communication of the DNDarray 'a' given to qr)
q_dict : Dict
dictionary containing the Q values calculated for finding R
key : string
key for q_dict[col] which corresponds to the Q to send
q_dict_waits : Dict
Dictionary used in the collection of the Qs which are sent to the diagonal process
q_dtype : torch.type
Type of the Q tensor
q_device : torch.Device
Device of the Q tensor
Returns
-------
None, sets the values of q_dict_waits with the with *waits* for the values of Q, upper.shape,
and lower.shape
"""
if comm.rank not in [pr0, pr1, diag_process]:
return
# this is to send the merged q to the diagonal process for the forming of q
base_tag = "1" + str(pr1.item() if isinstance(pr1, torch.Tensor) else pr1)
if comm.rank == pr1:
q = q_dict[col][key][0]
u_shape = q_dict[col][key][1]
l_shape = q_dict[col][key][2]
comm.send(tuple(q.shape), dest=diag_process, tag=int(base_tag + "1"))
comm.Isend(q, dest=diag_process, tag=int(base_tag + "12"))
comm.send(u_shape, dest=diag_process, tag=int(base_tag + "123"))
comm.send(l_shape, dest=diag_process, tag=int(base_tag + "1234"))
if comm.rank == diag_process:
# q_dict_waits now looks like a
q_sh = comm.recv(source=pr1, tag=int(base_tag + "1"))
q_recv = torch.zeros(q_sh, dtype=q_dtype, device=q_device)
k = "p0" + str(pr0) + "p1" + str(pr1)
q_dict_waits[col][k] = []
q_wait = comm.Irecv(q_recv, source=pr1, tag=int(base_tag + "12"))
q_dict_waits[col][k].append([q_recv, q_wait])
q_dict_waits[col][k].append(comm.irecv(source=pr1, tag=int(base_tag + "123")))
q_dict_waits[col][k].append(comm.irecv(source=pr1, tag=int(base_tag + "1234")))
q_dict_waits[col][k].append(key[0])
def __split0_q_loop(col, r_tiles, proc_tile_start, active_procs, q0_tiles, q_dict, q_dict_waits):
"""
Function for Calculating Q for split=0 for QR. col is the index of the tile column. The
assumption here is that the diagonal tile is (col, col).
Parameters
----------
col : int
current column for which to calculate Q
t_tiles :
proc_tile_start : torch.Tensor
Tensor containing the row tile start indices for each process
active_procs : torch.Tensor
Tensor containing the ranks of processes with have data
q0_tiles :
q_dict : Dictionary
Dictionary created in the split=0 R calculation containing all of the Q matrices found
transforming the matrix to upper triangular for each column. The keys of this dictionary are
the column indices
q_dict_waits : Dictionary
Dictionary created while sending the Q matrices to the diagonal process
Returns
-------
None
"""
tile_columns = r_tiles.tile_columns
diag_process = (
torch.nonzero(input=proc_tile_start > col, as_tuple=False)[0]
if col != tile_columns
else proc_tile_start[-1]
)
diag_process = diag_process.item()
rank = r_tiles.arr.comm.rank
q0_dtype = q0_tiles.arr.dtype
q0_torch_type = q0_dtype.torch_type()
q0_torch_device = q0_tiles.arr.device.torch_device
# wait for Q tensors sent during the R calculation -----------------------------------------
if col in q_dict_waits.keys():
for key in q_dict_waits[col].keys():
new_key = q_dict_waits[col][key][3] + key + "e"
q_dict_waits[col][key][0][1].wait()
q_dict[col][new_key] = [
q_dict_waits[col][key][0][0],
q_dict_waits[col][key][1].wait(),
q_dict_waits[col][key][2].wait(),
]
del q_dict_waits[col]
# local Q calculation =====================================================================
if col in q_dict.keys():
lcl_col_shape = r_tiles.local_get(key=(slice(None), col)).shape
# get the start and stop of all local tiles
# -> get the rows_per_process[rank] and the row_indices
row_ind = r_tiles.row_indices
prev_rows_per_pr = sum(r_tiles.tile_rows_per_process[:rank])
rows_per_pr = r_tiles.tile_rows_per_process[rank]
if rows_per_pr == 1:
# if there is only one tile on the process: return q_dict[col]['0']
base_q = q_dict[col]["l0"][0].clone()
del q_dict[col]["l0"]
else:
# 0. get the offset of the column start
offset = (
torch.tensor(
row_ind[col].item() - row_ind[prev_rows_per_pr].item(), device=q0_torch_device
)
if row_ind[col].item() > row_ind[prev_rows_per_pr].item()
else torch.tensor(0, device=q0_torch_device)
)
# 1: create an eye matrix of the row's zero'th dim^2
q_lcl = q_dict[col]["l0"] # [0] -> q, [1] -> shape of a use in q calc (q is square)
del q_dict[col]["l0"]
base_q = torch.eye(
lcl_col_shape[r_tiles.arr.split], dtype=q_lcl[0].dtype, device=q0_torch_device
)
# 2: set the area of the eye as Q
base_q[offset : offset + q_lcl[1][0], offset : offset + q_lcl[1][0]] = q_lcl[0]
local_merge_q = {rank: [base_q, None]}
else:
local_merge_q = {}
# -------------- send local Q to all -------------------------------------------------------
for pr in range(diag_process, active_procs[-1] + 1):
if pr != rank:
hld = torch.zeros(
[q0_tiles.lshape_map[pr][q0_tiles.arr.split]] * 2,
dtype=q0_torch_type,
device=q0_torch_device,
)
else:
hld = local_merge_q[pr][0].clone()
wait = q0_tiles.arr.comm.Ibcast(hld, root=pr)
local_merge_q[pr] = [hld, wait]
# recv local Q + apply local Q to Q0
for pr in range(diag_process, active_procs[-1] + 1):
if local_merge_q[pr][1] is not None:
# receive q from the other processes
local_merge_q[pr][1].wait()
if rank in active_procs:
sum_row = sum(q0_tiles.tile_rows_per_process[:pr])
end_row = q0_tiles.tile_rows_per_process[pr] + sum_row
# slice of q_tiles -> [0: -> end local, 1: start -> stop]
q_rest_loc = q0_tiles.local_get(key=(slice(None), slice(sum_row, end_row)))
# apply the local merge to q0 then update q0`
q_rest_loc = q_rest_loc @ local_merge_q[pr][0]
q0_tiles.local_set(key=(slice(None), slice(sum_row, end_row)), value=q_rest_loc)
del local_merge_q[pr]
# global Q calculation =====================================================================
# split up the Q's from the global QR calculation and set them in a dict w/ proper keys
global_merge_dict = (
__split0_global_q_dict_set(
q_dict_col=q_dict[col], col=col, r_tiles=r_tiles, q_tiles=q0_tiles
)
if rank == diag_process
else {}
)
if rank == diag_process:
merge_dict_keys = set(global_merge_dict.keys())
else:
merge_dict_keys = None
merge_dict_keys = r_tiles.arr.comm.bcast(merge_dict_keys, root=diag_process)
# send the global merge dictionary to all processes
for k in merge_dict_keys:
if rank == diag_process:
snd = global_merge_dict[k].clone()
snd_shape = snd.shape
r_tiles.arr.comm.bcast(snd_shape, root=diag_process)
else:
snd_shape = None
snd_shape = r_tiles.arr.comm.bcast(snd_shape, root=diag_process)
snd = torch.empty(snd_shape, dtype=q0_dtype.torch_type(), device=q0_torch_device)
wait = r_tiles.arr.comm.Ibcast(snd, root=diag_process)
global_merge_dict[k] = [snd, wait]
if rank in active_procs:
# create a dictionary which says what tiles are in each column of the global merge Q
qi_mult = {}
for c in range(q0_tiles.tile_columns):
# this loop is to slice the merge_dict keys along each column + create the
qi_mult_set = set([(i, c) for i in range(col, q0_tiles.tile_columns)])
if len(qi_mult_set & merge_dict_keys) != 0:
qi_mult[c] = list(qi_mult_set & merge_dict_keys)
# have all the q_merge in one place, now just do the mm with q0
# get all the keys which are in a column (qi_mult[column])
row_inds = q0_tiles.row_indices + [q0_tiles.arr.gshape[0]]
q_copy = q0_tiles.arr._DNDarray__array.clone()
for qi_col in qi_mult.keys():
# multiply q0 rows with qi cols
# the result of this will take the place of the row height and the column width
out_sz = q0_tiles.local_get(key=(slice(None), qi_col)).shape
mult_qi_col = torch.zeros(
(q_copy.shape[1], out_sz[1]), dtype=q0_dtype.torch_type(), device=q0_torch_device
)
for ind in qi_mult[qi_col]:
if global_merge_dict[ind][1] is not None:
global_merge_dict[ind][1].wait()
lp_q = global_merge_dict[ind][0]
if mult_qi_col.shape[1] < lp_q.shape[1]:
new_mult = torch.zeros(
(mult_qi_col.shape[0], lp_q.shape[1]),
dtype=mult_qi_col.dtype,
device=q0_torch_device,
)
new_mult[:, : mult_qi_col.shape[1]] += mult_qi_col.clone()
mult_qi_col = new_mult
mult_qi_col[
row_inds[ind[0]] : row_inds[ind[0]] + lp_q.shape[0], : lp_q.shape[1]
] = lp_q
hold = torch.matmul(q_copy, mult_qi_col)
write_inds = q0_tiles.get_start_stop(key=(0, qi_col))
q0_tiles.arr.lloc[:, write_inds[2] : write_inds[2] + hold.shape[1]] = hold
else:
for ind in merge_dict_keys:
global_merge_dict[ind][1].wait()
if col in q_dict.keys():
del q_dict[col]
def __split1_qr_loop(dcol, r_tiles, q0_tiles, calc_q):
"""
Helper function to do the QR factorization of the column 'dcol'. This function assumes that the
target tile is at (dcol, dcol). This is the standard case at it assumes that the diagonal tile
holds the diagonal entries of the matrix.
Parameters
----------
dcol : int
column of the diagonal process
r_tiles : tiling.SquareDiagTiles
input matrix tiles to QR,
if copy is true in QR then it is a copy of the data, else it is the same as the input
q0_tiles : tiling.SquareDiagTiles
the Q matrix tiles as created in the QR function.
calc_q : Boolean
Flag for weather to calculate Q or not, if False, then Q=None
Returns
-------
None
"""
r_torch_device = r_tiles.arr._DNDarray__array.device
q0_torch_device = q0_tiles.arr._DNDarray__array.device if calc_q else None
# ==================================== R Calculation - single tile =========================
# loop over each column, need to do the QR for each tile in the column(should be rows)
# need to get the diagonal process
rank = r_tiles.arr.comm.rank
cols_on_proc = torch.cumsum(
torch.tensor(r_tiles.tile_columns_per_process, device=r_torch_device), dim=0
)
not_completed_processes = torch.nonzero(input=dcol < cols_on_proc, as_tuple=False).flatten()
diag_process = not_completed_processes[0].item()
tile_rows = r_tiles.tile_rows
# get the diagonal tile and do qr on it
# send q to the other processes
# 1st qr: only on diagonal tile + apply to the row
if rank == diag_process:
# do qr on diagonal process
q1, r1 = r_tiles[dcol, dcol].qr(some=False)
r_tiles.arr.comm.Bcast(q1.clone(), root=diag_process)
r_tiles[dcol, dcol] = r1
# apply q1 to the trailing matrix (other processes)
# need to convert dcol to a local index
loc_col = dcol - sum(r_tiles.tile_columns_per_process[:rank])
hold = r_tiles.local_get(key=(dcol, slice(loc_col + 1, None)))
if hold is not None: # if there is more data on that row after the diagonal tile
r_tiles.local_set(key=(dcol, slice(loc_col + 1, None)), value=torch.matmul(q1.T, hold))
elif rank > diag_process:
# recv the Q from the diagonal process, and apply it to the trailing matrix
st_sp = r_tiles.get_start_stop(key=(dcol, dcol))
sz = st_sp[1] - st_sp[0], st_sp[3] - st_sp[2]
q1 = torch.zeros(
(sz[0], sz[0]), dtype=r_tiles.arr.dtype.torch_type(), device=r_torch_device
)
loc_col = 0
r_tiles.arr.comm.Bcast(q1, root=diag_process)
hold = r_tiles.local_get(key=(dcol, slice(0, None)))
r_tiles.local_set(key=(dcol, slice(0, None)), value=torch.matmul(q1.T, hold))
else:
# these processes are already done calculating R, only need to calc Q, need to recv q1
st_sp = r_tiles.get_start_stop(key=(dcol, dcol))
sz = st_sp[1] - st_sp[0], st_sp[3] - st_sp[2]
q1 = torch.zeros(
(sz[0], sz[0]), dtype=r_tiles.arr.dtype.torch_type(), device=r_torch_device
)
r_tiles.arr.comm.Bcast(q1, root=diag_process)
# ================================ Q Calculation - single tile =============================
if calc_q:
for row in range(q0_tiles.tile_rows_per_process[rank]):
# q1 is applied to each tile of the column dcol of q0 then written there
q0_tiles.local_set(
key=(row, dcol), value=torch.matmul(q0_tiles.local_get(key=(row, dcol)), q1)
)
del q1
# loop over the rest of the rows, combine the tiles, then apply the result to the rest
# 2nd step: merged QR on the rows
# ================================ R Calculation - merged tiles ============================
diag_tile = r_tiles[dcol, dcol]
# st_sp = r_tiles.get_start_stop(key=(dcol, dcol))
diag_st_sp = r_tiles.get_start_stop(key=(dcol, dcol))
diag_sz = diag_st_sp[1] - diag_st_sp[0], diag_st_sp[3] - diag_st_sp[2]
# (Q) need to get the start stop of diag tial
for row in range(dcol + 1, tile_rows):
lp_st_sp = r_tiles.get_start_stop(key=(row, dcol))
lp_sz = lp_st_sp[1] - lp_st_sp[0], lp_st_sp[3] - lp_st_sp[2]
if rank == diag_process:
# cat diag tile and loop tile
loop_tile = r_tiles[row, dcol]
loop_cat = torch.cat((diag_tile, loop_tile), dim=0)
# qr
ql, rl = loop_cat.qr(some=False)
# send ql to all
r_tiles.arr.comm.Bcast(ql.clone().contiguous(), root=diag_process)
# set rs
r_tiles[dcol, dcol] = rl[: diag_sz[0]]
r_tiles[row, dcol] = rl[diag_sz[0] :]
# apply q to rest
if loc_col + 1 < r_tiles.tile_columns_per_process[rank]:
upp = r_tiles.local_get(key=(dcol, slice(loc_col + 1, None)))
low = r_tiles.local_get(key=(row, slice(loc_col + 1, None)))
hold = torch.matmul(ql.T, torch.cat((upp, low), dim=0))
# set upper
r_tiles.local_set(key=(dcol, slice(loc_col + 1, None)), value=hold[: diag_sz[0]])
# set lower
r_tiles.local_set(key=(row, slice(loc_col + 1, None)), value=hold[diag_sz[0] :])
elif rank > diag_process:
ql = torch.zeros(
[lp_sz[0] + diag_sz[0]] * 2,
dtype=r_tiles.arr.dtype.torch_type(),
device=r_torch_device,
)
r_tiles.arr.comm.Bcast(ql, root=diag_process)
upp = r_tiles.local_get(key=(dcol, slice(0, None)))
low = r_tiles.local_get(key=(row, slice(0, None)))
hold = torch.matmul(ql.T, torch.cat((upp, low), dim=0))
# set upper
r_tiles.local_set(key=(dcol, slice(0, None)), value=hold[: diag_sz[0]])
# set lower
r_tiles.local_set(key=(row, slice(0, None)), value=hold[diag_sz[0] :])
else:
ql = torch.zeros(
[lp_sz[0] + diag_sz[0]] * 2,
dtype=r_tiles.arr.dtype.torch_type(),
device=r_torch_device,
)
r_tiles.arr.comm.Bcast(ql, root=diag_process)
# ================================ Q Calculation - merged tiles ========================
if calc_q:
top_left = ql[: diag_sz[0], : diag_sz[0]]
top_right = ql[: diag_sz[0], diag_sz[0] :]
bottom_left = ql[diag_sz[0] :, : diag_sz[0]]
bottom_right = ql[diag_sz[0] :, diag_sz[0] :]
# two multiplications: one for the left tiles and one for the right
# left tiles --------------------------------------------------------------------
# create r column of the same size as the tile row of q0
st_sp = r_tiles.get_start_stop(key=(slice(dcol, None), dcol))
qloop_col_left_sz = st_sp[1] - st_sp[0], st_sp[3] - st_sp[2]
qloop_col_left = torch.zeros(
qloop_col_left_sz, dtype=q0_tiles.arr.dtype.torch_type(), device=q0_torch_device
)
# top left starts at 0 and goes until diag_sz[1]
qloop_col_left[: diag_sz[0]] = top_left
# bottom left starts at ? and goes until ? (only care about 0th dim)
st, sp, _, _ = r_tiles.get_start_stop(key=(row, 0))
st -= diag_st_sp[0] # adjust these by subtracting the start index of the diag tile
sp -= diag_st_sp[0]
qloop_col_left[st:sp] = bottom_left
# right tiles --------------------------------------------------------------------
# create r columns tensor of the size of the tile column of index 'row'
st_sp = q0_tiles.get_start_stop(key=(row, slice(dcol, None)))
sz = st_sp[1] - st_sp[0], st_sp[3] - st_sp[2]
qloop_col_right = torch.zeros(
sz[1], sz[0], dtype=q0_tiles.arr.dtype.torch_type(), device=q0_torch_device
)
# top left starts at 0 and goes until diag_sz[1]
qloop_col_right[: diag_sz[0]] = top_right
# bottom left starts at ? and goes until ? (only care about 0th dim)
st, sp, _, _ = r_tiles.get_start_stop(key=(row, 0))
st -= diag_st_sp[0] # adjust these by subtracting the start index of the diag tile
sp -= diag_st_sp[0]
qloop_col_right[st:sp] = bottom_right
for qrow in range(q0_tiles.tile_rows_per_process[rank]):
# q1 is applied to each tile of the column dcol of q0 then written there
q0_row = q0_tiles.local_get(key=(qrow, slice(dcol, None))).clone()
q0_tiles.local_set(key=(qrow, dcol), value=torch.matmul(q0_row, qloop_col_left))
q0_tiles.local_set(key=(qrow, row), value=torch.matmul(q0_row, qloop_col_right))
del ql
|
the-stack_106_23935 | """
Utilities Tests
---------------
UnitTests for the utilities module
"""
import unittest
import tempfile
import os
from damn_at import utilities as utils
class UtilTests(unittest.TestCase):
def test_is_existing_file_a(self):
"""Test returns false when given a bad path"""
ret = utils.is_existing_file('/foo/monkey/slug/shit')
self.assertFalse(ret)
def test_is_existing_file_b(self):
"""Test returns true when given a valid path"""
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
ret = utils.is_existing_file(f.name)
self.assertTrue(ret)
def test_calculate_hash(self):
"""Test accurate hash generated from file"""
data = (
b'QlJBTlQgSVMgU1VQRVIgQVdFU09NRSBDT09MIEFORCBTSElULiBTTFVHUyBBUkUg'
b'Q1VURQ=='
)
expected_hash = '03439afa9d61f99f35d936b01ea8b4982ab247a0'
t = tempfile.NamedTemporaryFile(delete=False)
t.write(data)
t.close()
ret = utils.calculate_hash_for_file(t.name)
self.assertEqual(ret, expected_hash)
def test_unique_asset_id_reference_from_fields(self):
"""Make sure uuid generator produces accurate strings"""
ret = utils.unique_asset_id_reference_from_fields(
'OHIAMAHASH',
'SNAILJUICE',
'text/rtf'
)
self.assertEqual(ret, 'OHIAMAHASHSNAILJUICEtext__rtf')
def test_suite():
"""Return a list of tests"""
return unittest.TestLoader().loadTestsFromTestCase(UtilTests)
if __name__ == '__main__':
# unittest.main()
unittest.TextTestRunner().run(test_suite())
|
the-stack_106_23937 | ### LIBRARIES ###
# Global libraries
import os
import sys
import argparse
import logging
import pdb
from tqdm import tqdm, trange
import json
from io import open
import math
import random
from time import gmtime, strftime
from timeit import default_timer as timer
import numpy as np
from tensorboardX import SummaryWriter
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
# Custom libraries
from vilbert.datasets import ConceptCapLoaderTrain, ConceptCapLoaderVal
from multimodal_pretraining import BertForMultiModalPreTraining
from bert_config import BertConfig
### LOGGER CONFIGURATION ###
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
### MAIN FUNCTION ###
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--train_file",
default="data/conceptual_caption/training",
type=str,
# required=True,
help="The input train corpus.",
)
parser.add_argument(
"--validation_file",
default="data/conceptual_caption/validation",
type=str,
# required=True,
help="The input train corpus.",
)
parser.add_argument(
"--from_pretrained",
default="",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--output_dir",
default="save",
type=str,
# required=True,
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--config_file",
default="config/bert_config.json",
type=str,
# required=True,
help="The config file which specified the model details.",
)
## Other parameters
parser.add_argument(
"--max_seq_length",
default=36,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument("--predict_feature", action="store_true", help="visual target.")
parser.add_argument(
"--train_batch_size",
default=512,
type=int,
help="Total batch size for training.",
)
parser.add_argument(
"--learning_rate",
default=1e-4,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=10.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--start_epoch",
default=0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.",
)
parser.add_argument(
"--img_weight", default=1, type=float, help="weight for image loss"
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--on_memory",
action="store_true",
help="Whether to load train samples into memory or use disk",
)
parser.add_argument(
"--do_lower_case",
type=bool,
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit float precision instead of 32-bit",
)
parser.add_argument(
"--loss_scale",
type=float,
default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n",
)
parser.add_argument(
"--num_workers",
type=int,
default=3,
help="Number of workers in the dataloader.",
)
parser.add_argument(
"--save_name", default="", type=str, help="save name for training.",
)
parser.add_argument(
"--baseline",
action="store_true",
help="Wheter to use the baseline model (single bert).",
)
parser.add_argument(
"--freeze",
default=-1,
type=int,
help="till which layer of textual stream of vilbert need to fixed.",
)
parser.add_argument(
"--use_chuncks",
default=0,
type=float,
help="whether use chunck for parallel training.",
)
parser.add_argument(
"--distributed",
action="store_true",
help="whether use chunck for parallel training.",
)
parser.add_argument(
"--without_coattention", action="store_true", help="whether pair loss."
)
args = parser.parse_args()
if args.baseline:
from pytorch_pretrained_bert.modeling import BertConfig
from vilbert.basebert import BertForMultiModalPreTraining
else:
from vilbert.vilbert import BertForMultiModalPreTraining, BertConfig
print(args)
if args.save_name is not "":
timeStamp = args.save_name
else:
timeStamp = strftime("%d-%b-%y-%X-%a", gmtime())
timeStamp += "_{:0>6d}".format(random.randint(0, 10e6))
savePath = os.path.join(args.output_dir, timeStamp)
if not os.path.exists(savePath):
os.makedirs(savePath)
config = BertConfig.from_json_file(args.config_file)
if args.freeze > config.t_biattention_id[0]:
config.fixed_t_layer = config.t_biattention_id[0]
if args.without_coattention:
config.with_coattention = False
# save all the hidden parameters.
with open(os.path.join(savePath, "command.txt"), "w") as f:
print(args, file=f) # Python 3.x
print("\n", file=f)
print(config, file=f)
bert_weight_name = json.load(
open("config/" + args.from_pretrained + "_weight_name.json", "r")
)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
logger.info(
"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16
)
)
if args.gradient_accumulation_steps < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps
)
)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case
)
num_train_optimization_steps = None
viz = TBlogger("logs", timeStamp)
train_dataset = ConceptCapLoaderTrain(
args.train_file,
tokenizer,
seq_len=args.max_seq_length,
batch_size=args.train_batch_size,
predict_feature=args.predict_feature,
num_workers=args.num_workers,
distributed=args.distributed,
)
validation_dataset = ConceptCapLoaderVal(
args.validation_file,
tokenizer,
seq_len=args.max_seq_length,
batch_size=args.train_batch_size,
predict_feature=args.predict_feature,
num_workers=2,
distributed=args.distributed,
)
num_train_optimization_steps = int(
train_dataset.num_dataset
/ args.train_batch_size
/ args.gradient_accumulation_steps
) * (args.num_train_epochs - args.start_epoch)
# if args.local_rank != -1:
# num_train_optimization_steps = (
# num_train_optimization_steps // torch.distributed.get_world_size()
# )
default_gpu = False
if dist.is_available() and args.distributed:
rank = dist.get_rank()
if rank == 0:
default_gpu = True
else:
default_gpu = True
# pdb.set_trace()
if args.predict_feature:
config.v_target_size = 2048
config.predict_feature = True
else:
config.v_target_size = 1601
config.predict_feature = False
if args.from_pretrained:
model = BertForMultiModalPreTraining.from_pretrained(
args.from_pretrained, config
)
else:
model = BertForMultiModalPreTraining(config)
model.cuda()
if args.fp16:
model.half()
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
)
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
if args.freeze != -1:
bert_weight_name_filtered = []
for name in bert_weight_name:
if "embeddings" in name:
bert_weight_name_filtered.append(name)
elif "encoder" in name:
layer_num = name.split(".")[2]
if int(layer_num) <= args.freeze:
bert_weight_name_filtered.append(name)
optimizer_grouped_parameters = []
for key, value in dict(model.named_parameters()).items():
if key[12:] in bert_weight_name_filtered:
value.requires_grad = False
if default_gpu:
print("filtered weight")
print(bert_weight_name_filtered)
if not args.from_pretrained:
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
else:
optimizer_grouped_parameters = []
for key, value in dict(model.named_parameters()).items():
if value.requires_grad:
if key[12:] in bert_weight_name:
lr = args.learning_rate * 0.1
else:
lr = args.learning_rate
if any(nd in key for nd in no_decay):
optimizer_grouped_parameters += [
{"params": [value], "lr": lr, "weight_decay": 0.01}
]
if not any(nd in key for nd in no_decay):
optimizer_grouped_parameters += [
{"params": [value], "lr": lr, "weight_decay": 0.0}
]
if default_gpu:
print(
len(list(model.named_parameters())), len(optimizer_grouped_parameters)
)
# set different parameters for vision branch and lanugage branch.
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
)
optimizer = FusedAdam(
optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0,
)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
if args.from_pretrained:
optimizer = BertAdam(
optimizer_grouped_parameters,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps,
)
else:
optimizer = BertAdam(
optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps,
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_dataset.num_dataset)
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
startIterID = 0
global_step = 0
masked_loss_v_tmp = 0
masked_loss_t_tmp = 0
next_sentence_loss_tmp = 0
loss_tmp = 0
start_t = timer()
# t1 = timer()
for epochId in range(int(args.start_epoch), int(args.num_train_epochs)):
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# iter_dataloader = iter(train_dataloader)
for step, batch in enumerate(train_dataset):
iterId = startIterID + step + (epochId * len(train_dataset))
# batch = iter_dataloader.next()
batch = tuple(t.cuda(device=device, non_blocking=True) for t in batch)
(
input_ids,
input_mask,
segment_ids,
lm_label_ids,
is_next,
image_feat,
image_loc,
image_target,
image_label,
image_mask,
image_ids,
) = batch
masked_loss_t, masked_loss_v, next_sentence_loss = model(
input_ids,
image_feat,
image_loc,
segment_ids,
input_mask,
image_mask,
lm_label_ids,
image_label,
image_target,
is_next,
)
if args.without_coattention:
next_sentence_loss = next_sentence_loss * 0
masked_loss_v = masked_loss_v * args.img_weight
loss = masked_loss_t + masked_loss_v + next_sentence_loss
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
masked_loss_t = masked_loss_t.mean()
masked_loss_v = masked_loss_v.mean()
next_sentence_loss = next_sentence_loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if math.isnan(loss.item()):
pdb.set_trace()
tr_loss += loss.item()
rank = 0
if dist.is_available() and args.distributed:
rank = dist.get_rank()
else:
rank = 0
viz.linePlot(iterId, loss.item(), "loss_" + str(rank), "train")
viz.linePlot(
iterId, masked_loss_t.item(), "masked_loss_t_" + str(rank), "train"
)
viz.linePlot(
iterId, masked_loss_v.item(), "masked_loss_v_" + str(rank), "train"
)
viz.linePlot(
iterId,
next_sentence_loss.item(),
"next_sentence_loss_" + str(rank),
"train",
)
# viz.linePlot(iterId, optimizer.get_lr()[0], 'learning_rate', 'train')
loss_tmp += loss.item()
masked_loss_v_tmp += masked_loss_v.item()
masked_loss_t_tmp += masked_loss_t.item()
next_sentence_loss_tmp += next_sentence_loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(
global_step / num_train_optimization_steps,
args.warmup_proportion,
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if step % 20 == 0 and step != 0:
masked_loss_t_tmp = masked_loss_t_tmp / 20.0
masked_loss_v_tmp = masked_loss_v_tmp / 20.0
next_sentence_loss_tmp = next_sentence_loss_tmp / 20.0
loss_tmp = loss_tmp / 20.0
end_t = timer()
timeStamp = strftime("%a %d %b %y %X", gmtime())
Ep = epochId + nb_tr_steps / float(len(train_dataset))
printFormat = "[%s][Ep: %.2f][Iter: %d][Time: %5.2fs][Loss: %.5g][Loss_v: %.5g][Loss_t: %.5g][Loss_n: %.5g][LR: %.8g]"
printInfo = [
timeStamp,
Ep,
nb_tr_steps,
end_t - start_t,
loss_tmp,
masked_loss_v_tmp,
masked_loss_t_tmp,
next_sentence_loss_tmp,
optimizer.get_lr()[0],
]
start_t = end_t
print(printFormat % tuple(printInfo))
masked_loss_v_tmp = 0
masked_loss_t_tmp = 0
next_sentence_loss_tmp = 0
loss_tmp = 0
# Do the evaluation
torch.set_grad_enabled(False)
start_t = timer()
numBatches = len(validation_dataset)
eval_masked_loss_t = 0
eval_masked_loss_v = 0
eval_next_sentence_loss = 0
eval_total_loss = 0
model.eval()
for step, batch in enumerate(validation_dataset):
batch = tuple(t.cuda(device=device, non_blocking=True) for t in batch)
(
input_ids,
input_mask,
segment_ids,
lm_label_ids,
is_next,
image_feat,
image_loc,
image_target,
image_label,
image_mask,
image_ids,
) = batch
masked_loss_t, masked_loss_v, next_sentence_loss = model(
input_ids,
image_feat,
image_loc,
segment_ids,
input_mask,
image_mask,
lm_label_ids,
image_label,
image_target,
is_next,
)
masked_loss_v = masked_loss_v * args.img_weight
loss = masked_loss_t + masked_loss_v + next_sentence_loss
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
masked_loss_t = masked_loss_t.mean()
masked_loss_v = masked_loss_v.mean()
next_sentence_loss = next_sentence_loss.mean()
eval_masked_loss_t += masked_loss_t.item()
eval_masked_loss_v += masked_loss_v.item()
eval_next_sentence_loss += next_sentence_loss.item()
eval_total_loss += loss.item()
end_t = timer()
delta_t = " Time: %5.2fs" % (end_t - start_t)
start_t = end_t
progressString = "\r Evaluating split '%s' [%d/%d]\t" + delta_t
sys.stdout.write(progressString % ("val", step + 1, numBatches))
sys.stdout.flush()
eval_masked_loss_t = eval_masked_loss_t / float(numBatches)
eval_masked_loss_v = eval_masked_loss_v / float(numBatches)
eval_next_sentence_loss = eval_next_sentence_loss / float(numBatches)
eval_total_loss = eval_total_loss / float(numBatches)
printFormat = (
"Evaluation: [Loss: %.5g][Loss_v: %.5g][Loss_t: %.5g][Loss_n: %.5g]"
)
printInfo = [
eval_total_loss,
eval_masked_loss_v,
eval_masked_loss_t,
eval_next_sentence_loss,
]
print(printFormat % tuple(printInfo))
torch.set_grad_enabled(True)
viz.linePlot(epochId, eval_total_loss, "loss_" + str(rank), "val")
viz.linePlot(epochId, eval_masked_loss_t, "masked_loss_t_" + str(rank), "val")
viz.linePlot(epochId, eval_masked_loss_v, "masked_loss_v_" + str(rank), "val")
viz.linePlot(
epochId, eval_next_sentence_loss, "next_sentence_loss_" + str(rank), "val"
)
if default_gpu:
# Save a trained model
logger.info("** ** * Saving fine - tuned model ** ** * ")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Only save the model it-self
output_model_file = os.path.join(
savePath, "pytorch_model_" + str(epochId) + ".bin"
)
torch.save(model_to_save.state_dict(), output_model_file)
class TBlogger:
def __init__(self, log_dir, exp_name):
log_dir = log_dir + "/" + exp_name
print("logging file at: " + log_dir)
self.logger = SummaryWriter(log_dir=log_dir)
def linePlot(self, step, val, split, key, xlabel="None"):
self.logger.add_scalar(split + "/" + key, val, step)
if __name__ == "__main__":
main()
|
the-stack_106_23938 |
import importlib
import threading
import logging
import json
import re
import ckanext.hdx_service_checker.checks as checks
import ckanext.hdx_service_checker.exceptions as exceptions
log = logging.getLogger(__name__)
LOCK = threading.RLock()
def run_checks(config_file_path, runtime_vars):
with open(config_file_path) as file_data:
config_list = json.load(file_data)
if config_list:
checker = Checker(config_list, runtime_vars)
return checker.run_checks()
else:
raise exceptions.ParamMissingException("Missing config for file path")
class Checker(object):
def __init__(self, config_list, runtime_vars):
self.checks = []
for config in config_list:
try:
self.__replace_runtime_vars(config, runtime_vars)
except exceptions.ParamMissingException as ex:
error_message = str(ex)
config = {
'name': config['name'],
'module_name': 'ckanext.hdx_service_checker.checks.checks',
'class_name': 'DummyCheck',
'error_message': error_message,
'description': 'The configured {} check could not be performed. '
'There was an error when replacing runtime vars.'.format(config['class_name']),
'result': 'Failed',
}
log.warning(error_message)
class_name = config['class_name']
module_name = config['module_name']
key = '{}:{}'.format(module_name, class_name)
user_agent = runtime_vars.get('HDX_USER_AGENT', 'SERVICE_CHECKER')
# 2 or more requests could hit this at the same time
with LOCK:
if not checks.get_check_implementation(key):
c_module = importlib.import_module(module_name)
clazz = getattr(c_module, class_name)
checks.add_check_implementation(key, clazz)
check_obj = checks.get_check_implementation(key)(config, user_agent)
self.checks.append(check_obj)
pass
def __replace_runtime_vars(self, config, runtime_vars):
'''
Replaces runtime variables in the configuration. Config dict is modified.
:param config: configuration for 1 check
:type config: dict
:param runtime_vars: variables determined at application runtime
:type runtime_vars: dict
'''
def __replace_var(match):
var_name = match.group(1)
var_value = runtime_vars.get(var_name)
if not var_value:
raise exceptions.ParamMissingException('{} is not a runtime variable'.format(var_name))
else:
return str(var_value)
for key in config.keys():
val = config.get(key)
if val:
new_val = re.sub(r'\#\{([a-zA-Z0-9-_.]+)\}', __replace_var, str(val))
config[key] = new_val
def run_checks(self):
result_list = []
for check in self.checks:
log.info('Running check: {}'.format(str(check.config)))
result = check.run_check()
result_list.append(result)
log.info(str(result))
return result_list
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p', handlers=[logging.StreamHandler()])
runtime_vars = {
'hdx.rest.indicator.endpoint': 'https://manage.humdata.org/public/api2/values',
'SOLR_URL': 'http://172.17.42.1:9011/solr/ckan/select?q=health&start=0&rows=1'
}
config_file = '/home/alex/PycharmProjects/hdx-ckan/ckanext-hdx_service_checker/ckanext/hdx_service_checker/config/config.json'
run_checks(config_file, runtime_vars)
|
the-stack_106_23939 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import json
import logging
import os
import re
import unittest
from argparse import ArgumentParser
from contextlib import redirect_stdout
from datetime import datetime
from unittest import mock
import pytest
from parameterized import parameterized
import datetime
from airflow import DAG
from airflow.cli import cli_parser
from airflow.cli.commands import task_command
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagRunNotFound
from airflow.models import DagBag, DagRun, TaskInstance
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime.datetime(2022, 1, 1)
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def reset(dag_id):
with create_session() as session:
tis = session.query(TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
runs = session.query(DagRun).filter_by(dag_id=dag_id)
runs.delete()
# TODO: Check if tests needs side effects - locally there's missing DAG
class TestCliTasks(unittest.TestCase):
run_id = 'TEST_RUN_ID'
dag_id = 'example_python_operator'
parser: ArgumentParser
dagbag: DagBag
dag: DAG
dag_run: DagRun
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
clear_db_runs()
cls.dag = cls.dagbag.get_dag(cls.dag_id)
cls.dag_run = cls.dag.create_dagrun(
state=State.NONE, run_id=cls.run_id, run_type=DagRunType.MANUAL, execution_date=DEFAULT_DATE
)
@classmethod
def tearDownClass(cls) -> None:
clear_db_runs()
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags:
args = self.parser.parse_args(['tasks', 'list', dag_id])
task_command.task_list(args)
args = self.parser.parse_args(['tasks', 'list', 'example_bash_operator', '--tree'])
task_command.task_list(args)
@pytest.mark.filterwarnings("ignore::airflow.utils.context.AirflowContextDeprecationWarning")
def test_test(self):
"""Test the `airflow test` command"""
args = self.parser.parse_args(
["tasks", "test", "example_python_operator", 'print_the_context', '2018-01-01']
)
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_test(args)
# Check that prints, and log messages, are shown
assert "'example_python_operator__print_the_context__20180101'" in stdout.getvalue()
@pytest.mark.filterwarnings("ignore::airflow.utils.context.AirflowContextDeprecationWarning")
def test_test_with_existing_dag_run(self):
"""Test the `airflow test` command"""
task_id = 'print_the_context'
args = self.parser.parse_args(["tasks", "test", self.dag_id, task_id, DEFAULT_DATE.isoformat()])
with self.assertLogs('airflow.models', level='INFO') as cm:
task_command.task_test(args)
assert any(
[
f"Marking task as SUCCESS. dag_id={self.dag_id}, task_id={task_id}" in log
for log in cm.output
]
)
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_run_with_existing_dag_run_id(self, mock_local_job):
"""
Test that we can run with existing dag_run_id
"""
task0_id = self.dag.task_ids[0]
args0 = [
'tasks',
'run',
'--ignore-all-dependencies',
'--local',
self.dag_id,
task0_id,
self.run_id,
]
task_command.task_run(self.parser.parse_args(args0), dag=self.dag)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
ignore_all_deps=True,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pickle_id=None,
pool=None,
external_executor_id=None,
)
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_run_raises_when_theres_no_dagrun(self, mock_local_job):
"""
Test that run raises when there's run_id but no dag_run
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag(dag_id)
task0_id = 'test_run_dependent_task'
run_id = 'TEST_RUN_ID'
args0 = [
'tasks',
'run',
'--ignore-all-dependencies',
'--local',
dag_id,
task0_id,
run_id,
]
with self.assertRaises(DagRunNotFound):
task_command.task_run(self.parser.parse_args(args0), dag=dag)
def test_cli_test_with_params(self):
task_command.task_test(
self.parser.parse_args(
[
'tasks',
'test',
'example_passing_params_via_test_command',
'run_this',
'--task-params',
'{"foo":"bar"}',
DEFAULT_DATE.isoformat(),
]
)
)
task_command.task_test(
self.parser.parse_args(
[
'tasks',
'test',
'example_passing_params_via_test_command',
'also_run_this',
'--task-params',
'{"foo":"bar"}',
DEFAULT_DATE.isoformat(),
]
)
)
def test_cli_test_with_env_vars(self):
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_test(
self.parser.parse_args(
[
'tasks',
'test',
'example_passing_params_via_test_command',
'env_var_test_task',
'--env-vars',
'{"foo":"bar"}',
DEFAULT_DATE.isoformat(),
]
)
)
output = stdout.getvalue()
assert 'foo=bar' in output
assert 'AIRFLOW_TEST_MODE=True' in output
@parameterized.expand(
[
("--ignore-all-dependencies",),
("--ignore-depends-on-past",),
("--ignore-dependencies",),
("--force",),
],
)
def test_cli_run_invalid_raw_option(self, option: str):
with pytest.raises(
AirflowException,
match="Option --raw does not work with some of the other options on this command.",
):
task_command.task_run(
self.parser.parse_args(
[ # type: ignore
'tasks',
'run',
'example_bash_operator',
'runme_0',
DEFAULT_DATE.isoformat(),
'--raw',
option,
]
)
)
def test_cli_run_mutually_exclusive(self):
with pytest.raises(AirflowException, match="Option --raw and --local are mutually exclusive."):
task_command.task_run(
self.parser.parse_args(
[
'tasks',
'run',
'example_bash_operator',
'runme_0',
DEFAULT_DATE.isoformat(),
'--raw',
'--local',
]
)
)
def test_task_render(self):
"""
tasks render should render and displays templated fields for a given task
"""
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_render(
self.parser.parse_args(['tasks', 'render', 'tutorial', 'templated', '2016-01-01'])
)
output = stdout.getvalue()
assert 'echo "2016-01-01"' in output
assert 'echo "2016-01-08"' in output
def test_cli_run_when_pickle_and_dag_cli_method_selected(self):
"""
tasks run should return an AirflowException when invalid pickle_id is passed
"""
pickle_id = 'pickle_id'
with pytest.raises(
AirflowException,
match=re.escape("You cannot use the --pickle option when using DAG.cli() method."),
):
task_command.task_run(
self.parser.parse_args(
[
'tasks',
'run',
'example_bash_operator',
'runme_0',
DEFAULT_DATE.isoformat(),
'--pickle',
pickle_id,
]
),
self.dag,
)
def test_task_state(self):
task_command.task_state(
self.parser.parse_args(
['tasks', 'state', self.dag_id, 'print_the_context', DEFAULT_DATE.isoformat()]
)
)
def test_task_states_for_dag_run(self):
dag2 = DagBag().dags['example_python_operator']
task2 = dag2.get_task(task_id='print_the_context')
default_date2 = timezone.make_aware(datetime(2016, 1, 9))
dag2.clear()
dagrun = dag2.create_dagrun(
state=State.RUNNING,
execution_date=default_date2,
run_type=DagRunType.MANUAL,
external_trigger=True,
)
ti2 = TaskInstance(task2, dagrun.execution_date)
ti2.set_state(State.SUCCESS)
ti_start = ti2.start_date
ti_end = ti2.end_date
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_states_for_dag_run(
self.parser.parse_args(
[
'tasks',
'states-for-dag-run',
'example_python_operator',
default_date2.isoformat(),
'--output',
"json",
]
)
)
actual_out = json.loads(stdout.getvalue())
assert len(actual_out) == 1
assert actual_out[0] == {
'dag_id': 'example_python_operator',
'execution_date': '2016-01-09T00:00:00+00:00',
'task_id': 'print_the_context',
'state': 'success',
'start_date': ti_start.isoformat(),
'end_date': ti_end.isoformat(),
}
def test_task_states_for_dag_run_when_dag_run_not_exists(self):
"""
task_states_for_dag_run should return an AirflowException when invalid dag id is passed
"""
with pytest.raises(DagRunNotFound):
default_date2 = timezone.make_aware(datetime(2016, 1, 9))
task_command.task_states_for_dag_run(
self.parser.parse_args(
[
'tasks',
'states-for-dag-run',
'not_exists_dag',
default_date2.isoformat(),
'--output',
"json",
]
)
)
def test_subdag_clear(self):
args = self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator', '--yes'])
task_command.task_clear(args)
args = self.parser.parse_args(
['tasks', 'clear', 'example_subdag_operator', '--yes', '--exclude-subdags']
)
task_command.task_clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator.section-1', '--yes'])
task_command.task_clear(args)
args = self.parser.parse_args(
['tasks', 'clear', 'example_subdag_operator.section-1', '--yes', '--exclude-parentdag']
)
task_command.task_clear(args)
class TestLogsfromTaskRunCommand(unittest.TestCase):
def setUp(self) -> None:
self.dag_id = "test_logging_dag"
self.task_id = "test_task"
self.run_id = "test_run"
self.dag_path = os.path.join(ROOT_FOLDER, "dags", "test_logging_in_dag.py")
reset(self.dag_id)
self.execution_date = timezone.make_aware(datetime(2017, 1, 1))
self.execution_date_str = self.execution_date.isoformat()
self.task_args = ['tasks', 'run', self.dag_id, self.task_id, '--local', self.execution_date_str]
self.log_dir = conf.get('logging', 'base_log_folder')
self.log_filename = f"dag_id={self.dag_id}/run_id={self.run_id}/task_id={self.task_id}/attempt=1.log"
self.ti_log_file_path = os.path.join(self.log_dir, self.log_filename)
self.parser = cli_parser.get_parser()
DagBag().get_dag(self.dag_id).create_dagrun(
run_id=self.run_id,
execution_date=self.execution_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
run_type=DagRunType.MANUAL,
)
root = self.root_logger = logging.getLogger()
self.root_handlers = root.handlers.copy()
self.root_filters = root.filters.copy()
self.root_level = root.level
try:
os.remove(self.ti_log_file_path)
except OSError:
pass
def tearDown(self) -> None:
root = self.root_logger
root.setLevel(self.root_level)
root.handlers[:] = self.root_handlers
root.filters[:] = self.root_filters
reset(self.dag_id)
try:
os.remove(self.ti_log_file_path)
except OSError:
pass
def assert_log_line(self, text, logs_list, expect_from_logging_mixin=False):
"""
Get Log Line and assert only 1 Entry exists with the given text. Also check that
"logging_mixin" line does not appear in that log line to avoid duplicate logging as below:
[2020-06-24 16:47:23,537] {logging_mixin.py:91} INFO - [2020-06-24 16:47:23,536] {python.py:135}
"""
log_lines = [log for log in logs_list if text in log]
assert len(log_lines) == 1
log_line = log_lines[0]
if not expect_from_logging_mixin:
# Logs from print statement still show with logging_mixing as filename
# Example: [2020-06-24 17:07:00,482] {logging_mixin.py:91} INFO - Log from Print statement
assert "logging_mixin.py" not in log_line
return log_line
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_external_executor_id_present_for_fork_run_task(self, mock_local_job):
args = self.parser.parse_args(self.task_args)
args.external_executor_id = "ABCD12345"
task_command.task_run(args)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
pickle_id=None,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=None,
external_executor_id="ABCD12345",
)
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_external_executor_id_present_for_process_run_task(self, mock_local_job):
args = self.parser.parse_args(self.task_args)
args.external_executor_id = "ABCD12345"
with mock.patch.dict(os.environ, {"external_executor_id": "12345FEDCBA"}):
task_command.task_run(args)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
pickle_id=None,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=None,
external_executor_id="ABCD12345",
)
@unittest.skipIf(not hasattr(os, 'fork'), "Forking not available")
def test_logging_with_run_task(self):
# We are not using self.assertLogs as we want to verify what actually is stored in the Log file
# as that is what gets displayed
with conf_vars({('core', 'dags_folder'): self.dag_path}):
task_command.task_run(self.parser.parse_args(self.task_args))
with open(self.ti_log_file_path) as l_file:
logs = l_file.read()
print(logs) # In case of a test failures this line would show detailed log
logs_list = logs.splitlines()
assert "INFO - Started process" in logs
assert f"Subtask {self.task_id}" in logs
assert "standard_task_runner.py" in logs
assert (
f"INFO - Running: ['airflow', 'tasks', 'run', '{self.dag_id}', "
f"'{self.task_id}', '{self.run_id}'," in logs
)
self.assert_log_line("Log from DAG Logger", logs_list)
self.assert_log_line("Log from TI Logger", logs_list)
self.assert_log_line("Log from Print statement", logs_list, expect_from_logging_mixin=True)
assert (
f"INFO - Marking task as SUCCESS. dag_id={self.dag_id}, "
f"task_id={self.task_id}, execution_date=20170101T000000" in logs
)
# For this test memory spins out of control on Python 3.6. TODO(potiuk): FIXME")
@pytest.mark.quarantined
@mock.patch("airflow.task.task_runner.standard_task_runner.CAN_FORK", False)
def test_logging_with_run_task_subprocess(self):
# We are not using self.assertLogs as we want to verify what actually is stored in the Log file
# as that is what gets displayed
with conf_vars({('core', 'dags_folder'): self.dag_path}):
task_command.task_run(self.parser.parse_args(self.task_args))
with open(self.ti_log_file_path) as l_file:
logs = l_file.read()
print(logs) # In case of a test failures this line would show detailed log
logs_list = logs.splitlines()
assert f"Subtask {self.task_id}" in logs
assert "base_task_runner.py" in logs
self.assert_log_line("Log from DAG Logger", logs_list)
self.assert_log_line("Log from TI Logger", logs_list)
self.assert_log_line("Log from Print statement", logs_list, expect_from_logging_mixin=True)
assert (
f"INFO - Running: ['airflow', 'tasks', 'run', '{self.dag_id}', "
f"'{self.task_id}', '{self.execution_date_str}'," in logs
)
assert (
f"INFO - Marking task as SUCCESS. dag_id={self.dag_id}, "
f"task_id={self.task_id}, execution_date=20170101T000000" in logs
)
def test_log_file_template_with_run_task(self):
"""Verify that the taskinstance has the right context for log_filename_template"""
with conf_vars({('core', 'dags_folder'): self.dag_path}):
# increment the try_number of the task to be run
with create_session() as session:
ti = session.query(TaskInstance).filter_by(run_id=self.run_id).first()
ti.try_number = 1
log_file_path = os.path.join(os.path.dirname(self.ti_log_file_path), "attempt=2.log")
try:
task_command.task_run(self.parser.parse_args(self.task_args))
assert os.path.exists(log_file_path)
finally:
try:
os.remove(log_file_path)
except OSError:
pass
@mock.patch.object(task_command, "_run_task_by_selected_method")
def test_root_logger_restored(self, run_task_mock):
"""Verify that the root logging context is restored"""
logger = logging.getLogger("foo.bar")
def task_inner(*args, **kwargs):
logger.warning("redirected log message")
run_task_mock.side_effect = task_inner
config = {
('core', 'dags_folder'): self.dag_path,
('logging', 'logging_level'): "INFO",
}
with conf_vars(config):
with self.assertLogs(level=logging.WARNING) as captured:
logger.warning("not redirected")
task_command.task_run(self.parser.parse_args(self.task_args))
assert captured.output == ["WARNING:foo.bar:not redirected"]
assert self.root_logger.level == logging.WARNING
assert self.root_logger.handlers == self.root_handlers
@pytest.mark.quarantined
@mock.patch.object(task_command, "_run_task_by_selected_method")
def test_disable_handler_modifying(self, run_task_mock):
"""If [core] donot_modify_handlers is set to True, the root logger is untouched"""
from airflow import settings
logger = logging.getLogger("foo.bar")
def task_inner(*args, **kwargs):
logger.warning("not redirected")
run_task_mock.side_effect = task_inner
config = {
('core', 'dags_folder'): self.dag_path,
('logging', 'logging_level'): "INFO",
}
old_value = settings.DONOT_MODIFY_HANDLERS
settings.DONOT_MODIFY_HANDLERS = True
with conf_vars(config):
with self.assertLogs(level=logging.WARNING) as captured:
task_command.task_run(self.parser.parse_args(self.task_args))
assert captured.output == ["WARNING:foo.bar:not redirected"]
settings.DONOT_MODIFY_HANDLERS = old_value
|
the-stack_106_23940 | # encoding: utf-8
"""
Custom element classes related to paragraphs (CT_P).
"""
from docx.enum.fields import WD_FIELD_TYPE
from ..ns import qn
from ..xmlchemy import BaseOxmlElement, OxmlElement, ZeroOrMore, ZeroOrOne
class CT_P(BaseOxmlElement):
"""
``<w:p>`` element, containing the properties and text for a paragraph.
"""
pPr = ZeroOrOne("w:pPr")
r = ZeroOrMore("w:r")
bookmarkStart = ZeroOrMore("w:bookmarkStart")
bookmarkEnd = ZeroOrMore("w:bookmarkEnd")
def add_bookmarkEnd(self, bookmark_id):
"""Return `w:bookmarkEnd` element added at end of document.
The newly added `w:bookmarkEnd` element is linked to it's `w:bookmarkStart`
counterpart by `bookmark_id`. It is the caller's responsibility to determine
`bookmark_id` matches that of the intended `bookmarkStart` element.
"""
bookmarkEnd = self._add_bookmarkEnd()
bookmarkEnd.id = bookmark_id
return bookmarkEnd
def add_bookmarkStart(self, name, bookmark_id):
"""Return `w:bookmarkStart` element added at end of document.
The newly added `w:bookmarkStart` element is identified by both `name` and
`bookmark_id`. It is the caller's responsibility to determine that both `name`
and `bookmark_id` are unique, document-wide.
"""
bookmarkStart = self._add_bookmarkStart()
bookmarkStart.name = name
bookmarkStart.id = bookmark_id
return bookmarkStart
def add_field(self, fieldtype=WD_FIELD_TYPE.REF, switches="\h"):
"""Return a newly created ``<w:fldSimple>`` element containing a fieldcode."""
fld = self._add_fldsimple(
instr=WD_FIELD_TYPE.to_xml(fieldtype) + f" {switches}"
)
return fld
def _insert_pPr(self, pPr):
self.insert(0, pPr)
return pPr
def add_p_before(self):
"""
Return a new ``<w:p>`` element inserted directly prior to this one.
"""
new_p = OxmlElement('w:p')
self.addprevious(new_p)
return new_p
@property
def alignment(self):
"""
The value of the ``<w:jc>`` grandchild element or |None| if not
present.
"""
pPr = self.pPr
if pPr is None:
return None
return pPr.jc_val
@alignment.setter
def alignment(self, value):
pPr = self.get_or_add_pPr()
pPr.jc_val = value
def clear_content(self):
"""
Remove all child elements, except the ``<w:pPr>`` element if present.
"""
for child in self[:]:
if child.tag == qn('w:pPr'):
continue
self.remove(child)
def set_sectPr(self, sectPr):
"""
Unconditionally replace or add *sectPr* as a grandchild in the
correct sequence.
"""
pPr = self.get_or_add_pPr()
pPr._remove_sectPr()
pPr._insert_sectPr(sectPr)
@property
def style(self):
"""
String contained in w:val attribute of ./w:pPr/w:pStyle grandchild,
or |None| if not present.
"""
pPr = self.pPr
if pPr is None:
return None
return pPr.style
@style.setter
def style(self, style):
pPr = self.get_or_add_pPr()
pPr.style = style
|
the-stack_106_23941 | #
# Protocol diffing tool from http://github.com/dsjoerg/s2protocol
#
# Usage: s2_cli.py --diff 38215,38749
#
import sys
import argparse
import pprint
from zephyrus_sc2_parser.s2protocol_fixed.versions import build
def diff_things(typeinfo_index, thing_a, thing_b):
if type(thing_a) != type(thing_b):
print(
"typeinfo {} diff types: {} {}".format(
typeinfo_index, type(thing_a), type(thing_b)
)
)
return
if type(thing_a) == dict:
thing_a = thing_a.items()
thing_b = thing_b.items()
if type(thing_a) == list or type(thing_a) == tuple:
if len(thing_a) != len(thing_b):
print(
"typeinfo {} diff len: {} {}".format(
typeinfo_index, len(thing_a), len(thing_b)
)
)
else:
for ix in range(len(thing_a)):
diff_things(typeinfo_index, thing_a[ix], thing_b[ix])
elif thing_a != thing_b:
if type(thing_a) == int:
if (thing_a < 55 or thing_a - 1 != thing_b):
print(
"typeinfo {} diff number: {} {}".format(
typeinfo_index, thing_a, thing_b
)
)
else:
print(
"typeinfo {} diff string: {} {}".format(
typeinfo_index, thing_a, thing_b
)
)
def diff(protocol_a_ver, protocol_b_ver):
print(
"Comparing {} to {}".format(
protocol_a_ver, protocol_b_ver
)
)
protocol_a = build(protocol_a_ver)
protocol_b = build(protocol_b_ver)
count_a = len(protocol_a.typeinfos)
count_b = len(protocol_b.typeinfos)
print("Count of typeinfos: {} {}".format(count_a, count_b))
for index in range(max(count_a, count_b)):
if index >= count_a:
print("Protocol {} missing typeinfo {}".format(protocol_a_ver, index))
continue
if index >= count_b:
print("Protocol {} missing typeinfo {}".format(protocol_b_ver, index))
continue
a = protocol_a.typeinfos[index]
b = protocol_b.typeinfos[index]
diff_things(index, a, b)
|
the-stack_106_23942 | ''' Show a streaming, updating representation of Fourier Series.
The example was inspired by `this video`_.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve fourier_animated.py
at your command prompt. Then navigate to the URL
http://localhost:5006/fourier_animated
in your browser.
.. _this video: https://www.youtube.com/watch?v=LznjC4Lo7lE
'''
from collections import OrderedDict
import numpy as np
from bokeh.driving import repeat
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
pi = np.pi
N = 100
newx = x = np.linspace(0, 2*pi, N)
shift = 2.2
base_x = x + shift
period = pi/2
palette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']
def new_source():
return dict(
curve=ColumnDataSource(dict(x=[], base_x=[], y=[])),
lines=ColumnDataSource(dict(line_x=[], line_y=[], radius_x=[], radius_y=[])),
circle_point=ColumnDataSource(dict(x=[], y=[], r=[])),
circleds=ColumnDataSource(dict(x=[], y=[]))
)
def create_circle_glyphs(p, color, sources):
p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])
p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])
p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])
def create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):
if y_range is None:
y_range=[-2, 2]
# create new figure
p = figure(title=title, plot_width=800, plot_height=300, x_range=[-2, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
cx, cy = 0, 0
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)
cp = sources['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i==0:
# compute the full fourier eq
full_y = sum(foo(x) for foo in foos)
# replace the foo curve with the full fourier eq
sources['curve'] = ColumnDataSource(dict(x=x, base_x=base_x, y=full_y))
# draw the line
p.line('base_x','y', color="orange", line_width=2, source=sources['curve'])
if i==len(foos)-1:
# if it's the last foo let's draw a circle on the head of the curve
sources['floating_point'] = ColumnDataSource({'x':[shift], 'y': [cy]})
p.line('line_x', 'line_y', color=palette[i], line_width=2, source=sources['lines'])
p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])
# draw the circle, radius and circle point related to foo domain
create_circle_glyphs(p, palette[i], sources)
_sources.append(sources)
return p, _sources
def get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):
if compute_curve:
ys = foo(xs)
sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)
r = foo(period)
y = foo(xs[0]) + cy
x = cfoo(xs[0]) + cx
sources['lines'].data = {
'line_x': [x, shift], 'line_y': [y, y],
'radius_x': [0, x], 'radius_y': [0, y]
}
sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}
sources['circleds'].data=dict(
x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,
y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,
)
def update_sources(sources, foos, newx, ind, cfoos):
cx, cy = 0, 0
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,
compute_curve = i != 0)
if i == 0:
full_y = sum(foo(newx) for foo in foos)
sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)
cp = sources[i]['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i == len(foos)-1:
sources[i]['floating_point'].data['x'] = [shift]
sources[i]['floating_point'].data['y'] = [cy]
def update_centric_sources(sources, foos, newx, ind, cfoos):
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i])
def create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):
p = figure(title=title, plot_width=800, plot_height=300, x_range=[-2, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i])
_sources.append(sources)
if i:
legend_label = "4sin(%(c)sx)/%(c)spi" % {'c': i*2+1}
else:
legend_label = "4sin(x)/pi"
p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])
p.line('line_x', 'line_y', color=palette[i], line_width=2,
source=sources['lines'], legend_label=legend_label)
create_circle_glyphs(p, palette[i], sources)
p.legend.location = "top_right"
p.legend.orientation = "horizontal"
p.legend.padding = 6
p.legend.margin = 6
p.legend.spacing = 6
return p, _sources
# create the series partials
f1 = lambda x: (4*np.sin(x))/pi
f2 = lambda x: (4*np.sin(3*x))/(3*pi)
f3 = lambda x: (4*np.sin(5*x))/(5*pi)
f4 = lambda x: (4*np.sin(7*x))/(7*pi)
cf1 = lambda x: (4*np.cos(x))/pi
cf2 = lambda x: (4*np.cos(3*x))/(3*pi)
cf3 = lambda x: (4*np.cos(5*x))/(5*pi)
cf4 = lambda x: (4*np.cos(7*x))/(7*pi)
fourier = OrderedDict(
fourier_4 = {
'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),
'fs': [f1, f2, f3, f4],
'cfs': [cf1, cf2, cf3, cf4]
},
)
for k, p in fourier.items():
p['plot'], p['sources'] = create_plot(
p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']
)
for k, p in fourier.items():
p['cplot'], p['csources'] = create_centric_plot(
p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']
)
layout = column(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])
@repeat(range(N))
def cb(gind):
global newx
oldx = np.delete(newx, 0)
newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])
for k, p in fourier.items():
update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])
update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])
curdoc().add_periodic_callback(cb, 100)
curdoc().add_root(layout)
curdoc().title = "Fourier Animated"
|
the-stack_106_23943 | from __future__ import absolute_import
import sys
import numpy
import sklearn.preprocessing
import ctypes
import faiss
from ann_benchmarks.algorithms.base import BaseANN
from ann_benchmarks.algorithms.faiss import Faiss
class FaissIVF(Faiss):
def __init__(self, metric, n_list):
self._n_list = n_list
self._metric = metric
def fit(self, X):
if self._metric == 'angular':
X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
if X.dtype != numpy.float32:
X = X.astype(numpy.float32)
self.quantizer = faiss.IndexFlatL2(X.shape[1])
index = faiss.IndexIVFFlat(
self.quantizer, X.shape[1], self._n_list, faiss.METRIC_L2)
index.train(X)
index.add(X)
self.index = index
def set_query_arguments(self, n_probe):
faiss.cvar.indexIVF_stats.reset()
self._n_probe = n_probe
self.index.nprobe = self._n_probe
def get_additional(self):
return {"dist_comps": faiss.cvar.indexIVF_stats.ndis + # noqa
faiss.cvar.indexIVF_stats.nq * self._n_list}
def __str__(self):
return 'FaissIVF(n_list=%d, n_probe=%d)' % (self._n_list,
self._n_probe)
class FaissIVFPQ(Faiss):
def __init__(self, metric, n_list, n_M, n_bits):
self._metric = metric
self._n_list = n_list
self._n_M = n_M
self._n_bits = n_bits
self.name = 'FaissIVFPQ(n_list=%d, n_M=%d, n_bits=%d)' % (self._n_list, self._n_M, self._n_bits)
def fit(self, X):
# is this necessary?
if self._metric == 'angular':
X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
if X.dtype != numpy.float32:
X = X.astype(numpy.float32)
self.quantizer = faiss.IndexFlatL2(X.shape[1]) # can be changed in further
#index = faiss.IndexIVFPQ(
# self.quantizer, X.shape[1], self._n_list, self._n_M, self._n_bits, faiss.METRIC_L2)
index = faiss.index_factory(X.shape[1], f"IVF{self._n_list},PQ{self._n_M}x{self._n_bits}", faiss.METRIC_L2)
index.train(X[:250000])
index.add(X)
self.index = index
def set_query_arguments(self, n_probe):
faiss.cvar.indexIVF_stats.reset()
faiss.cvar.indexIVFPQ_stats.reset()
self._n_probe = n_probe
self.index.nprobe = self._n_probe
def __str__(self):
return 'FaissIVFPQ(n_list=%d, n_M=%d, n_bits=%d, n_probe=%d)' % (self._n_list, self._n_M, self._n_bits, self._n_probe)
class FaissIVFPQFS(Faiss):
def __init__(self, metric, n_list):
self._metric = metric
self._n_list = n_list
self.name = 'FaissIVFPQFS,RFlat(n_list=%d)' % (self._n_list)
def fit(self, X):
if self._metric == 'angular':
X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
if X.dtype != numpy.float32:
X = X.astype(numpy.float32)
M = X.shape[1] // 2
index_build_str = f"IVF{self._n_list},PQ{M}x4fs,RFlat"
print(f"index_build_str={index_build_str}")
index = faiss.index_factory(X.shape[1], index_build_str, faiss.METRIC_INNER_PRODUCT)
index.train(X[:250000])
index.add(X)
index_refine = faiss.IndexRefineFlat(index, faiss.swig_ptr(X))
self.index = index
self.index_refine = index_refine
def set_query_arguments(self, n_probe, n_reorder_k):
self._n_probe = n_probe
self._n_reorder_k = n_reorder_k
self.index.nprobe = self._n_probe
faiss.omp_set_num_threads(1)
def query(self, v, n):
if self._metric == 'angular':
v /= numpy.linalg.norm(v)
if self._n_reorder_k == 0:
D, I = self.index.search(
numpy.expand_dims(v, axis=0).astype(numpy.float32), n)
else:
self.index_refine.k_factor = self._n_reorder_k / n
D, I = self.index_refine.search(
numpy.expand_dims(v, axis=0).astype(numpy.float32), n)
return I[0]
def __str__(self):
return 'FaissIVFPQFS(n_list=%d, n_probe=%d, n_reorder_k=%d)' % (self._n_list, self._n_probe, self._n_reorder_k)
|
the-stack_106_23946 | import os
import sys
from copy import deepcopy
from distutils.core import Extension
from ..openmp_helpers import add_openmp_flags_if_available
from ..setup_helpers import _module_state, register_commands
IS_TRAVIS_LINUX = os.environ.get('TRAVIS_OS_NAME', None) == 'linux'
IS_APPVEYOR = os.environ.get('APPVEYOR', None) == 'True'
PY3_LT_35 = sys.version_info[0] == 3 and sys.version_info[1] < 5
_state = None
def setup_function(function):
global state
state = deepcopy(_module_state)
def teardown_function(function):
_module_state.clear()
_module_state.update(state)
def test_add_openmp_flags_if_available():
register_commands('openmp_testing', '0.0', False)
using_openmp = add_openmp_flags_if_available(Extension('test', []))
# Make sure that on Travis (Linux) and AppVeyor OpenMP does get used (for
# MacOS X usually it will not work but this will depend on the compiler).
# Having this is useful because we'll find out if OpenMP no longer works
# for any reason on platforms on which it does work at the time of writing.
# OpenMP doesn't work on Python 3.x where x<5 on AppVeyor though.
if IS_TRAVIS_LINUX or (IS_APPVEYOR and not PY3_LT_35):
assert using_openmp
|
the-stack_106_23947 | import zmq
import sys
import math
import numpy
class Broker:
context = zmq.Context()
router = context.socket(zmq.ROUTER)
#poller = zmq.Poller()
p = 0
def __init__(self, n):
self.op = {"WorkDone":self.serverResponse, "serverFREE":self.serverFree}
self.router.bind("tcp://*:5000")
#elementos para realizar la operacion
self.n = n
self.bestK = None
self.obtenido = {}
#self.colaK = [1, self.n//2, self.n]
#self.colaK = [1, numpy.random.randint(2,self.n/2),numpy.random.randint(self.n/2,self.n + 1)]
self.colaK = [1,int(self.n/8), int(self.n/4 + 1)]
self.cantKCalculate = 0
def serverResponse(self, data):
print("el servidor acabo de evaluar un k")
print(data[1])
print(data)
#guardar el resultado en una estructura de datos, evaluar estructura de datos
kObtenido = int(data[2].decode())
ssdobtenido = float(data[3].decode())
if kObtenido in self.obtenido:
print("el k ya habia sido calculado")
else:
self.obtenido[kObtenido] = ssdobtenido
print("obtenido k: " , kObtenido, "su ssd:", ssdobtenido)
self.cantKCalculate += 1
def serverFree(self, data):
print("un servidor esta libre se le va asignar un k para que trabaje")
print(data[1])
#validar si no tengo que conseguir mas k
#enviar un mensaje diciendole al sever que termino para que no mande mas trabajo
#sacar k que necesito pedir de alguna estructura de datos
#msj = None
if self.cantKCalculate <= math.sqrt(self.n):
if len(self.colaK): #hay elementos para enviar
ktocalc = self.colaK.pop(0)
msj = [data[0], b'KMEANS', str(ktocalc).encode()]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1#tengo un k adicional
else:#espere que no hay trabajo
msj = [data[0], b'WAIT', b"0"]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1#tengo un k adicional
else:
print("ha finalizado el proceso no puedo enviar mas")
msj = [data[0], b'Finish', b"0"]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1
def run(self):
print("Running the server Broker....")
while True:#cambiar esto hasta que el numero que K que haya pedido sea raiz de n
#print("revisando si un server ha solicitado")
if self.router.poll(100):
print("-----------un servidor ha hecho una solicitud--------------")
msj = self.router.recv_multipart()
#print("lo que me envia el server:", msj[1])
self.op[msj[1].decode()](msj)
#validar los k que tengo si son suficientes
#validar el k apropiado hasta este momento
#agregar a una cola que K's voy a pedir
if len(list(self.obtenido.keys())) >= 3:
print("calculando elbow")
a,b,c = self.elbow2()
print("k a buscar", a,b,c)
try:
self.colaK.append(numpy.random.randint(a,b+1))
self.colaK.append(numpy.random.randint(b, c+1))
#self.colaK.append(numpy.random.randint(1, 3000))
#self.colaK.append(numpy.random.randint(1, 3000))
except Exception as e:
print("hubo un erro y no se peuden agregar k a la cola")
#self.colaK.append(numpy.random.randint(l,m+1))
#self.colaK.append(numpy.random.randint(m, r+1))
#distribuciones y agregar a la cola de k
print("el mejor k hasta el momento:" , self.bestK)
def dist(self, x, y):
return math.sqrt(x*x + y*y)
def calculoTheta(self, x1, y1, x2, y2) :
var = (x1*x2+y2*y2)/(self.dist(x1, y1)*self.dist(x2, y2))
print("el valor a calcular en el acos", var)
if var > 1:
var = 1
if var < -1:
var = -1
res = math.acos(var)
print("el valor del theta calculado es:", res)
return res
def elbow2(self):
listaOrdenada = list(self.obtenido.keys())#los value represetan los y
listaOrdenada.sort()#tomo las llaves que representan los x
l = 0
r = len(listaOrdenada) - 1
k = (l+r)>>1#dividir entre dos
theta = self.calculoTheta(listaOrdenada[l]-listaOrdenada[k],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[k]],
listaOrdenada[r]-listaOrdenada[k],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[k]])
flag = True
while flag:
flag = False
midI = math.ceil((k+l)/2)#techo
midD = math.floor((k+r)/2)
thetaD = 4
thetaI = 4
orientation = 0
if midI < k:
thetaI = self.calculoTheta(listaOrdenada[l]-listaOrdenada[midI],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[midI]],
listaOrdenada[k]-listaOrdenada[midI],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[midI]])
if midD > k:
thetaD = self.calculoTheta(listaOrdenada[k]-listaOrdenada[midD],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[midD]],
listaOrdenada[r]-listaOrdenada[midD],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[midD]])
#validar primero si los id son validos
if (thetaD < theta) or (thetaI < theta):
#tanteo las thetas xD
print("posiciones")
print(l)
print(k)
print(r)
if thetaD < thetaI:
print("derecha")
print("mid", midD)
flag = True
theta = thetaD
l = k
k = midD
self.bestK = listaOrdenada[k]
orientation = 0
else:
print("izquierda")
print("mid", midI)
flag = True
theta = thetaI
r = k
k = midI
self.bestK = listaOrdenada[k]
orientation = 1
print("posiciones actualizadas")
print(l)
print(k)
print(r)
"""if orientation:
return listaOrdenada[k], listaOrdenada[r]
else:
return listaOrdenada[l], listaOrdenada[k]"""
print(listaOrdenada)
return listaOrdenada[l], listaOrdenada[k], listaOrdenada[r]
def elbow(self):
listaOrdenada = list(self.obtenido.keys())#los value represetan los y
listaOrdenada.sort()#tomo las llaves que representan los x
l = 0
r = len(listaOrdenada) - 1
k = (l+r)>>1#dividir entre dos
self.bestK = k
# En la posicion 0 esta el 'x' y en la posicion 1 esta el 'y'
# calculamos el theta inicial
#theta = calculoTheta(listaOrdenada[l][0]-listaOrdenada[k][0], listaOrdenada[l][1]-listaOrdenada[k][1],listaOrdenada[r][0]-listaOrdenada[k][0], listaOrdenada[r][1]-listaOrdenada[k][1])
theta = self.calculoTheta(listaOrdenada[l]-listaOrdenada[k],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[k]],
listaOrdenada[r]-listaOrdenada[k],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[k]])
print("valor de thetha", theta)
flag = True
while(flag) :
flag = False
#mid = (k+r)>>1#piso
mid = math.floor((k+r)/2)
print("el valor de mid", mid)
print("el valor de r", r)
print("el valor de k", k)
print("el valor de l", l)
print(listaOrdenada)
print(list(self.obtenido.items()))
#auxmid = 0
#k mid r
# calculamos el theta temp por el lado derecho
temp = self.calculoTheta(listaOrdenada[k]-listaOrdenada[mid],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[mid]],
listaOrdenada[r]-listaOrdenada[mid],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[mid]])
# Comprobamos si el theta temp es menor que el tetha actual
if(theta > temp) :
flag = True
theta = temp
l = k
k = mid
self.bestK = k
mid = math.ceil((k+l)/2)#techo
# calculamos el theta temp por el lado izquierdo
#temp = calculoTheta(listaOrdenada[l][0]-listaOrdenada[mid][0], listaOrdenada[l][1]-listaOrdenada[mid][1],
#listaOrdenada[k][0]-listaOrdenada[mid][0], listaOrdenada[k][1]-listaOrdenada[mid][1])
temp = self.calculoTheta(listaOrdenada[l]-listaOrdenada[mid],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[mid]],
listaOrdenada[k]-listaOrdenada[mid],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[mid]])
# comprobamos si el theta es menor
if(theta > temp) :
flag = True
theta = temp
r = k
k = mid
self.bestK = k
#l2,k5,r9
return l,k,r
if __name__ == '__main__':
cantPoints = int(sys.argv[1])
print("cantidad de puntos:", cantPoints)
b = Broker(cantPoints)
b.run()
|
the-stack_106_23948 | import absl.flags
import absl.testing
import test_util
absl.flags.DEFINE_string("model", None, "model path to execute")
class ManualTest(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(ManualTest, self).__init__(
absl.flags.FLAGS.model, *args, **kwargs
)
def compare_results(self, iree_results, tflite_results, details):
super(ManualTest, self).compare_results(
iree_results, tflite_results, details
)
def test_compile_tflite(self):
if self.model_path is not None:
self.compile_and_execute()
if __name__ == "__main__":
absl.testing.absltest.main()
|
the-stack_106_23949 | import midtransclient
# initialize core api client object
core = midtransclient.CoreApi(
is_production=False,
server_key='YOUR_SERVER_KEY',
client_key='YOUR_CLIENT_KEY'
)
# Alternative way to initialize CoreApi client object:
# core = midtransclient.CoreApi()
# core.api_config.set(
# is_production=False,
# server_key='YOUR_SERVER_KEY',
# client_key='YOUR_CLIENT_KEY'
# )
# Another alternative way to initialize CoreApi client object:
# core = midtransclient.CoreApi()
# core.api_config.is_production=False
# core.api_config.server_key='YOUR_SERVER_KEY'
# core.api_config.client_key='YOUR_CLIENT_KEY'
# IMPORTANT NOTE: You should do credit card get token via frontend using `midtrans.min.js`, to avoid card data breach risks on your backend
# ( refer to: https://api-docs.midtrans.com )
# prepare CORE API parameter to get credit card token
params = {
'card_number': '5264 2210 3887 4659',
'card_exp_month': '12',
'card_exp_year': '2020',
'card_cvv': '123',
'client_key': core.api_config.client_key,
}
card_token_response = core.card_token(params)
cc_token = card_token_response['token_id']
# prepare CORE API parameter to charge credit card ( refer to: https://api-docs.midtrans.com )
param = {
"payment_type": "credit_card",
"transaction_details": {
"gross_amount": 12145,
"order_id": "test-transaction-54321",
},
"credit_card":{
"token_id": cc_token
}
}
# charge transaction
charge_response = core.charge(param)
print('charge_response:')
print(charge_response)
# charge_response is dictionary representation of API JSON response
# sample:
# {
# 'approval_code': '1540370521462',
# 'bank': 'bni',
# 'card_type': 'debit',
# 'channel_response_code': '00',
# 'channel_response_message': 'Approved',
# 'currency': 'IDR',
# 'fraud_status': 'accept',
# 'gross_amount': '12145.00',
# 'masked_card': '526422-4659',
# 'order_id': 'test-transaction-54321',
# 'payment_type': 'credit_card',
# 'status_code': '200',
# 'status_message': 'Success, Credit Card transaction is successful',
# 'transaction_id': '2bc57149-b52b-46ff-b901-86418ad1abcc',
# 'transaction_status': 'capture',
# 'transaction_time': '2018-10-24 15:42:01'
# }
|
the-stack_106_23950 | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTests.test_hostAndPeer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTests.test_hostAndPeer} to test
that ITransport.getHost() and ITransport.getPeer() work for process transports.
"""
import sys
from twisted.internet import stdio, protocol
from twisted.python import reflect
class HostPeerChild(protocol.Protocol):
def connectionMade(self):
self.transport.write(
b"\n".join(
[
str(self.transport.getHost()).encode("ascii"),
str(self.transport.getPeer()).encode("ascii"),
]
)
)
self.transport.loseConnection()
def connectionLost(self, reason):
reactor.stop()
if __name__ == "__main__":
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(HostPeerChild())
reactor.run() # type: ignore[attr-defined]
|
the-stack_106_23951 | """
Receieves notifications from remote queue.
"""
# pylint:disable=W0212
# pylint:disable=W0703
import threading
import logging
import json
from azure.servicebus import ServiceBusService
MESSAGE_WAIT_AFTER_ERROR = 5
MESSAGE_WAIT_TIMEOUT = 5
SBS_TOPIC_NAME = "webhooks"
SBS_SUBSCRIPTION_NAME = "RPiOneSubscription"
SBS_KEY_NAME = "ListenFromTopic"
class Client(object):
"""Client for ServiceBusService"""
def __init__(self, sbs_namespace, sbs_access_key):
if not sbs_namespace:
raise ValueError("'sbs_namespace' is required")
if not sbs_access_key:
raise ValueError("'sbs_access_key' is required")
self._logger = logging.getLogger(__name__)
self._sbs = ServiceBusService(service_namespace=sbs_namespace,
shared_access_key_name=SBS_KEY_NAME,
shared_access_key_value=sbs_access_key)
self._stop_event = None
self._thread = None
self._last_sequence = None
def start(self):
"""starts subscription"""
if not self._thread is None:
raise Exception("Client already started")
self._logger.info("Starting client for host %s", self._sbs._get_host())
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._receive_messages)
self._thread.daemon = True
self._thread.start()
def stop(self):
"""stops subscription"""
if self._thread is None:
raise Exception("Client is not started")
self._logger.info("Stopping client. May take up to %d seconds", MESSAGE_WAIT_TIMEOUT)
self._stop_event.set()
self._thread.join()
self._thread = None
self._stop_event = None
self._logger.info("Client stopped")
def _receive_messages(self):
"""Receieves messages from service"""
while not self._stop_event.is_set():
try:
message = self._sbs.receive_subscription_message(SBS_TOPIC_NAME,
SBS_SUBSCRIPTION_NAME,
timeout=MESSAGE_WAIT_TIMEOUT,
peek_lock=False)
except Exception:
self._logger.exception("Error while pulling message from topic")
self._stop_event.wait(MESSAGE_WAIT_AFTER_ERROR)
continue
if message is None or message.body is None:
self._logger.debug("No message received after waiting %d seconds",
MESSAGE_WAIT_TIMEOUT)
else:
sequence = message.broker_properties[u'SequenceNumber']
sent_on = message.broker_properties[u'EnqueuedTimeUtc']
body = message.body
self._logger.info("Message with sequence '%s' sent on '%s' receieved: %s",
sequence, sent_on, body)
if self._last_sequence > sequence:
self._logger.warning("Skipping message with sequence '%s' because the later"\
" one with sequence '%s' was already processed",
sequence, self._last_sequence)
else:
self._last_sequence = sequence
try:
self._process_message(body)
except Exception:
self._logger.exception("Failed to process a message")
def _process_message(self, message_body):
"""Process single message"""
parsed_message = json.loads(message_body)
msg_sender = parsed_message[u'name']
msg_text = parsed_message[u'text']
msg_type = parsed_message[u'type']
if not msg_sender or not msg_text or not msg_type:
raise ValueError("One of requried parameters is missing")
|
the-stack_106_23952 | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
#from scipy.misc import imread, imsave, imshow, imresize
import tensorflow as tf
from net import detxt_cpn as cpn
from utility import train_helper
from utility import mertric
from preprocessing import preprocessing
from preprocessing import dataset
import config
# hardware related configuration
tf.app.flags.DEFINE_integer(
'num_readers', 16,#16
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,#48
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../Datasets/tfrecords',#'/media/rs/0E06CD1706CD0127/Kapok/Chi/Datasets/tfrecords',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', '{}_????', 'The pattern of the dataset name to load.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_detxt_cpn/',
'The parent directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 100,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 3600,
'The frequency with which the model is saved, in seconds.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 384,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'heatmap_size', 96,
'The size of the output heatmap of the model.')
tf.app.flags.DEFINE_float(
'heatmap_sigma', 1.,
'The sigma of Gaussian which generate the target heatmap.')
tf.app.flags.DEFINE_float(
'bbox_border', 25.,
'The nearest distance of the crop border to al keypoints.')
tf.app.flags.DEFINE_integer(
'train_epochs', 50,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'epochs_per_eval', 20,
'The number of training epochs to run between evaluations.')
tf.app.flags.DEFINE_integer(
'batch_size', 10,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_boolean(
'use_ohkm', True,
'Wether we will use the ohkm for hard keypoints.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
# optimizer related configuration
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180417, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 1e-5, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'mse_weight', 1., 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')#1e-3
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'warmup_learning_rate', 0.00001,
'The start warm-up learning rate to avoid NAN.')
tf.app.flags.DEFINE_integer(
'warmup_steps', 100,
'The total steps to warm-up.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '2, 3',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.5, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model/seresnext50',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all'
'model_scope', None,
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud.')
tf.app.flags.DEFINE_string(
'cloud_checkpoint_path', 'seresnext50',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_boolean(
'seq_train', False,
'Wether we will train a sequence model.')
tf.app.flags.DEFINE_string(
'model_to_train', 'blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers',
'The sub-model to train (comma-separated list).')
FLAGS = tf.app.flags.FLAGS
#--model_scope=blouse --checkpoint_path=./logs/all --data_format=channels_last --batch_size=1
def input_pipeline(is_training=True, model_scope=FLAGS.model_scope, num_epochs=FLAGS.epochs_per_eval):
if 'all' in model_scope:
lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64),
tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0)
rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64),
tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1)
else:
lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64),
tf.constant(config.local_norm_lvalues, dtype=tf.int64)), 0)
rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64),
tf.constant(config.local_norm_rvalues, dtype=tf.int64)), 1)
preprocessing_fn = lambda org_image, classid, shape, key_x, key_y, key_v: preprocessing.preprocess_image(org_image, classid, shape, FLAGS.train_image_size, FLAGS.train_image_size, key_x, key_y, key_v, (lnorm_table, rnorm_table), is_training=is_training, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'), category=(model_scope if 'all' not in model_scope else '*'), bbox_border=FLAGS.bbox_border, heatmap_sigma=FLAGS.heatmap_sigma, heatmap_size=FLAGS.heatmap_size)
images, shape, classid, targets, key_v, isvalid, norm_value = dataset.slim_get_split(FLAGS.data_dir, preprocessing_fn, FLAGS.batch_size, FLAGS.num_readers, FLAGS.num_preprocessing_threads, num_epochs=num_epochs, is_training=is_training, file_pattern=FLAGS.dataset_name, category=(model_scope if 'all' not in model_scope else '*'), reader=None)
return images, {'targets': targets, 'key_v': key_v, 'shape': shape, 'classid': classid, 'isvalid': isvalid, 'norm_value': norm_value}
if config.PRED_DEBUG:
from scipy.misc import imread, imsave, imshow, imresize
def save_image_with_heatmap(image, height, width, heatmap_size, targets, pred_heatmap, indR, indG, indB):
if not hasattr(save_image_with_heatmap, "counter"):
save_image_with_heatmap.counter = 0 # it doesn't exist yet, so initialize it
save_image_with_heatmap.counter += 1
img_to_save = np.array(image.tolist()) + 128
#print(img_to_save.shape)
img_to_save = img_to_save.astype(np.uint8)
heatmap0 = np.sum(targets[indR, ...], axis=0).astype(np.uint8)
heatmap1 = np.sum(targets[indG, ...], axis=0).astype(np.uint8)
heatmap2 = np.sum(targets[indB, ...], axis=0).astype(np.uint8) if len(indB) > 0 else np.zeros((heatmap_size, heatmap_size), dtype=np.float32)
img_to_save = imresize(img_to_save, (height, width), interp='lanczos')
heatmap0 = imresize(heatmap0, (height, width), interp='lanczos')
heatmap1 = imresize(heatmap1, (height, width), interp='lanczos')
heatmap2 = imresize(heatmap2, (height, width), interp='lanczos')
img_to_save = img_to_save/2
img_to_save[:,:,0] = np.clip((img_to_save[:,:,0] + heatmap0 + heatmap2), 0, 255)
img_to_save[:,:,1] = np.clip((img_to_save[:,:,1] + heatmap1 + heatmap2), 0, 255)
#img_to_save[:,:,2] = np.clip((img_to_save[:,:,2]/4. + heatmap2), 0, 255)
file_name = 'targets_{}.jpg'.format(save_image_with_heatmap.counter)
imsave(os.path.join(config.DEBUG_DIR, file_name), img_to_save.astype(np.uint8))
pred_heatmap = np.array(pred_heatmap.tolist())
#print(pred_heatmap.shape)
for ind in range(pred_heatmap.shape[0]):
img = pred_heatmap[ind]
img = img - img.min()
img *= 255.0/img.max()
file_name = 'heatmap_{}_{}.jpg'.format(save_image_with_heatmap.counter, ind)
imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8))
return save_image_with_heatmap.counter
def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None):
predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size])
pred_max = tf.reduce_max(predictions, axis=-1)
pred_indices = tf.argmax(predictions, axis=-1)
pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32)
width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32)
pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32)
if clip_at_zero:
pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32)
pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.)
pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.)
if config.PRED_DEBUG:
pred_indices_ = tf.squeeze(pred_indices)
image_ = tf.squeeze(image) * 255.
pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32)
pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size])
if data_format == 'channels_first':
image_ = tf.transpose(image_, perm=(1, 2, 0))
save_image_op = tf.py_func(save_image_with_heatmap,
[image_, height, width,
heatmap_size,
tf.reshape(pred_heatmap * 255., [-1, heatmap_size, heatmap_size]),
tf.reshape(predictions, [-1, heatmap_size, heatmap_size]),
config.left_right_group_map[category][0],
config.left_right_group_map[category][1],
config.left_right_group_map[category][2]],
tf.int64, stateful=True)
with tf.control_dependencies([save_image_op]):
pred_x, pred_y = pred_x * 1., pred_y * 1.
return pred_x, pred_y
def gaussian_blur(inputs, inputs_filters, sigma, data_format, name=None):
with tf.name_scope(name, "gaussian_blur", [inputs]):
data_format_ = 'NHWC' if data_format=='channels_last' else 'NCHW'
if data_format_ == 'NHWC':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
ksize = int(6 * sigma + 1.)
x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1)
y = tf.transpose(x, [1, 0])
kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2))
#print(kernel_matrix)
kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1])
kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1])
#kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3])
outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur')
if data_format_ == 'NHWC':
outputs = tf.transpose(outputs, [0, 3, 1, 2])
return outputs
def keypoint_model_fn(features, labels, mode, params):
targets = labels['targets']
shape = labels['shape']
classid = labels['classid']
key_v = labels['key_v']
isvalid = labels['isvalid']
norm_value = labels['norm_value']
cur_batch_size = tf.shape(features)[0]
#features= tf.ones_like(features)
with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE):
pred_outputs = cpn.cascaded_pyramid_net(features, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], params['heatmap_size'], (mode == tf.estimator.ModeKeys.TRAIN), params['data_format'])
#print(pred_outputs)
if params['data_format'] == 'channels_last':
pred_outputs = [tf.transpose(pred_outputs[ind], [0, 3, 1, 2], name='outputs_trans_{}'.format(ind)) for ind in list(range(len(pred_outputs)))]
score_map = pred_outputs[-1]
pred_x, pred_y = get_keypoint(features, targets, score_map, params['heatmap_size'], params['train_image_size'], params['train_image_size'], (params['model_scope'] if 'all' not in params['model_scope'] else '*'), clip_at_zero=True, data_format=params['data_format'])
# this is important!!!
targets = 255. * targets
blur_list = [1., 1.37, 1.73, 2.4, None]#[1., 1.5, 2., 3., None]
#blur_list = [None, None, None, None, None]
targets_list = []
for sigma in blur_list:
if sigma is None:
targets_list.append(targets)
else:
# always channels first foe targets
targets_list.append(gaussian_blur(targets, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], sigma, params['data_format'], 'blur_{}'.format(sigma)))
# print(key_v)
#targets = tf.reshape(255.*tf.one_hot(tf.ones_like(key_v,tf.int64)*(params['heatmap_size']*params['heatmap_size']//2+params['heatmap_size']), params['heatmap_size']*params['heatmap_size']), [cur_batch_size,-1,params['heatmap_size'],params['heatmap_size']])
#norm_value = tf.ones_like(norm_value)
# score_map = tf.reshape(tf.one_hot(tf.ones_like(key_v,tf.int64)*(31*64+31), params['heatmap_size']*params['heatmap_size']), [cur_batch_size,-1,params['heatmap_size'],params['heatmap_size']])
#with tf.control_dependencies([pred_x, pred_y]):
ne_mertric = mertric.normalized_error(targets, score_map, norm_value, key_v, isvalid,
cur_batch_size,
config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')],
params['heatmap_size'],
params['train_image_size'])
# last_pred_mse = tf.metrics.mean_squared_error(score_map, targets,
# weights=1.0 / tf.cast(cur_batch_size, tf.float32),
# name='last_pred_mse')
# filter all invisible keypoint maybe better for this task
# all_visible = tf.logical_and(key_v>0, isvalid>0)
# targets_list = [tf.boolean_mask(targets_list[ind], all_visible) for ind in list(range(len(targets_list)))]
# pred_outputs = [tf.boolean_mask(pred_outputs[ind], all_visible, name='boolean_mask_{}'.format(ind)) for ind in list(range(len(pred_outputs)))]
all_visible = tf.expand_dims(tf.expand_dims(tf.cast(tf.logical_and(key_v>0, isvalid>0), tf.float32), axis=-1), axis=-1)
targets_list = [targets_list[ind] * all_visible for ind in list(range(len(targets_list)))]
pred_outputs = [pred_outputs[ind] * all_visible for ind in list(range(len(pred_outputs)))]
sq_diff = tf.reduce_sum(tf.squared_difference(targets, pred_outputs[-1]), axis=-1)
last_pred_mse = tf.metrics.mean_absolute_error(sq_diff, tf.zeros_like(sq_diff), name='last_pred_mse')
metrics = {'normalized_error': ne_mertric, 'last_pred_mse':last_pred_mse}
predictions = {'normalized_error': ne_mertric[1]}
ne_mertric = tf.identity(ne_mertric[1], name='ne_mertric')
base_learning_rate = params['learning_rate']
mse_loss_list = []
if params['use_ohkm']:
base_learning_rate = 1. * base_learning_rate
for pred_ind in list(range(len(pred_outputs) - 1)):
mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind],
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
scope='loss_{}'.format(pred_ind),
loss_collection=None,#tf.GraphKeys.LOSSES,
# mean all elements of all pixels in all batch
reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements
temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1)
num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2
gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1]
gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1])
gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1))
select_targets = tf.gather_nd(targets_list[-1], gather_indcies)
select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies)
mse_loss_list.append(tf.losses.mean_squared_error(select_targets, select_heatmap,
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
scope='loss_{}'.format(len(pred_outputs) - 1),
loss_collection=None,#tf.GraphKeys.LOSSES,
# mean all elements of all pixels in all batch
reduction=tf.losses.Reduction.MEAN))
else:
for pred_ind in list(range(len(pred_outputs))):
mse_loss_list.append(tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind],
weights=1.0 / tf.cast(cur_batch_size, tf.float32),
scope='loss_{}'.format(pred_ind),
loss_collection=None,#tf.GraphKeys.LOSSES,
# mean all elements of all pixels in all batch
reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements
mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss')
tf.summary.scalar('mse', mse_loss)
tf.losses.add_loss(mse_loss)
# bce_loss_list = []
# for pred_ind in list(range(len(pred_outputs))):
# bce_loss_list.append(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred_outputs[pred_ind], labels=targets_list[pred_ind]/255., name='loss_{}'.format(pred_ind)), name='loss_mean_{}'.format(pred_ind)))
# mse_loss = tf.multiply(params['mse_weight'] / params['num_stacks'], tf.add_n(bce_loss_list), name='mse_loss')
# tf.summary.scalar('mse', mse_loss)
# tf.losses.add_loss(mse_loss)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name])
total_loss = tf.identity(loss, name='total_loss')
tf.summary.scalar('loss', total_loss)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, predictions=predictions, eval_metric_ops=metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['warmup_learning_rate']] + [base_learning_rate * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[params['warmup_steps']] + [int(float(ep)*params['steps_per_epoch']) for ep in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate')
tf.summary.scalar('lr', truncated_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
momentum=params['momentum'])
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics,
scaffold=tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold_(params['checkpoint_path'], params['model_dir'], params['checkpoint_exclude_scopes'], params['model_scope'], params['checkpoint_model_scope'], params['ignore_missing_vars'])))
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def sub_loop(model_fn, model_scope, model_dir, run_config, train_epochs, epochs_per_eval, lr_decay_factors, decay_boundaries, checkpoint_path=None, checkpoint_exclude_scopes='', checkpoint_model_scope='', ignore_missing_vars=True):
steps_per_epoch = config.split_size[(model_scope if 'all' not in model_scope else '*')]['train'] // FLAGS.batch_size
fashionAI = tf.estimator.Estimator(
model_fn=model_fn, model_dir=model_dir, config=run_config,
params={
'checkpoint_path': checkpoint_path,
'model_dir': model_dir,
'checkpoint_exclude_scopes': checkpoint_exclude_scopes,
'model_scope': model_scope,
'checkpoint_model_scope': checkpoint_model_scope,
'ignore_missing_vars': ignore_missing_vars,
'train_image_size': FLAGS.train_image_size,
'heatmap_size': FLAGS.heatmap_size,
'data_format': FLAGS.data_format,
'steps_per_epoch': steps_per_epoch,
'use_ohkm': FLAGS.use_ohkm,
'batch_size': FLAGS.batch_size,
'weight_decay': FLAGS.weight_decay,
'mse_weight': FLAGS.mse_weight,
'momentum': FLAGS.momentum,
'learning_rate': FLAGS.learning_rate,
'end_learning_rate': FLAGS.end_learning_rate,
'warmup_learning_rate': FLAGS.warmup_learning_rate,
'warmup_steps': FLAGS.warmup_steps,
'decay_boundaries': parse_comma_list(decay_boundaries),
'lr_decay_factors': parse_comma_list(lr_decay_factors),
})
tf.gfile.MakeDirs(model_dir)
tf.logging.info('Starting to train model {}.'.format(model_scope))
for _ in range(train_epochs // epochs_per_eval):
tensors_to_log = {
'lr': 'learning_rate',
'loss': 'total_loss',
'mse': 'mse_loss',
'ne': 'ne_mertric',
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps, formatter=lambda dicts: '{}:'.format(model_scope) + (', '.join(['%s=%.6f' % (k, v) for k, v in dicts.items()])))
tf.logging.info('Starting a training cycle.')
fashionAI.train(input_fn=lambda : input_pipeline(True, model_scope, epochs_per_eval), hooks=[logging_hook], max_steps=(steps_per_epoch*train_epochs))
tf.logging.info('Starting to evaluate.')
eval_results = fashionAI.evaluate(input_fn=lambda : input_pipeline(False, model_scope, 1))
tf.logging.info(eval_results)
tf.logging.info('Finished model {}.'.format(model_scope))
def main(_):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction)
sess_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options)
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(
save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(
save_checkpoints_steps=None).replace(
save_summary_steps=FLAGS.save_summary_steps).replace(
keep_checkpoint_max=5).replace(
tf_random_seed=FLAGS.tf_random_seed).replace(
log_step_count_steps=FLAGS.log_every_n_steps).replace(
session_config=sess_config)
if FLAGS.seq_train:
detail_params = {
'all': {
'model_dir' : os.path.join(FLAGS.model_dir, 'all'),
'train_epochs': 6,
'epochs_per_eval': 4,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '3, 4',
'model_scope': 'all',
'checkpoint_path': None,
'checkpoint_model_scope': '',
'checkpoint_exclude_scopes': '',
'ignore_missing_vars': True,
},
'blouse': {
'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'),
'train_epochs': 50,
'epochs_per_eval': 30,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '15, 30',
'model_scope': 'blouse',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'blouse/additional_layer, blouse/feature_pyramid/conv_heatmap, blouse/global_net/conv_heatmap',
'ignore_missing_vars': True,
},
'dress': {
'model_dir' : os.path.join(FLAGS.model_dir, 'dress'),
'train_epochs': 50,
'epochs_per_eval': 30,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '15, 30',
'model_scope': 'dress',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'dress/additional_layer, dress/feature_pyramid/conv_heatmap, dress/global_net/conv_heatmap',
'ignore_missing_vars': True,
},
'outwear': {
'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'),
'train_epochs': 50,
'epochs_per_eval': 30,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '15, 30',
'model_scope': 'outwear',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'outwear/additional_layer, outwear/feature_pyramid/conv_heatmap, outwear/global_net/conv_heatmap',
'ignore_missing_vars': True,
},
'skirt': {
'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'),
'train_epochs': 50,
'epochs_per_eval': 30,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '15, 30',
'model_scope': 'skirt',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'skirt/additional_layer, skirt/feature_pyramid/conv_heatmap, skirt/global_net/conv_heatmap',
'ignore_missing_vars': True,
},
'trousers': {
'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'),
'train_epochs': 50,
'epochs_per_eval': 30,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '15, 30',
'model_scope': 'trousers',
'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'),
'checkpoint_model_scope': 'all',
'checkpoint_exclude_scopes': 'trousers/additional_layer, trousers/feature_pyramid/conv_heatmap, trousers/global_net/conv_heatmap',
'ignore_missing_vars': True,
},
}
else:
detail_params = {
'blouse': {
'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'),
'train_epochs': 28,
'epochs_per_eval': 7,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '10, 20',
'model_scope': 'blouse',
'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.cloud_checkpoint_path) if FLAGS.run_on_cloud else FLAGS.checkpoint_path,
'checkpoint_model_scope': '',
'checkpoint_exclude_scopes': 'blouse/additional_layer, blouse/feature_pyramid, blouse/global_net',
'ignore_missing_vars': True,
},
'dress': {
'model_dir' : os.path.join(FLAGS.model_dir, 'dress'),
'train_epochs': 28,
'epochs_per_eval': 7,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '10, 20',
'model_scope': 'dress',
'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.cloud_checkpoint_path) if FLAGS.run_on_cloud else FLAGS.checkpoint_path,
'checkpoint_model_scope': '',
'checkpoint_exclude_scopes': 'dress/additional_layer, dress/feature_pyramid, dress/global_net',
'ignore_missing_vars': True,
},
'outwear': {
'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'),
'train_epochs': 28,
'epochs_per_eval': 7,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '10, 20',
'model_scope': 'outwear',
'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.cloud_checkpoint_path) if FLAGS.run_on_cloud else FLAGS.checkpoint_path,
'checkpoint_model_scope': '',
'checkpoint_exclude_scopes': 'outwear/additional_layer, outwear/feature_pyramid, outwear/global_net',
'ignore_missing_vars': True,
},
'skirt': {
'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'),
'train_epochs': 28,
'epochs_per_eval': 7,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '10, 20',
'model_scope': 'skirt',
'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.cloud_checkpoint_path) if FLAGS.run_on_cloud else FLAGS.checkpoint_path,
'checkpoint_model_scope': '',
'checkpoint_exclude_scopes': 'skirt/additional_layer, skirt/feature_pyramid, skirt/global_net',
'ignore_missing_vars': True,
},
'trousers': {
'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'),
'train_epochs': 28,
'epochs_per_eval': 7,
'lr_decay_factors': '1, 0.5, 0.1',
'decay_boundaries': '10, 20',
'model_scope': 'trousers',
'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.cloud_checkpoint_path) if FLAGS.run_on_cloud else FLAGS.checkpoint_path,
'checkpoint_model_scope': '',
'checkpoint_exclude_scopes': 'trousers/additional_layer, trousers/feature_pyramid, trousers/global_net',
'ignore_missing_vars': True,
},
}
model_to_train = [s.strip() for s in FLAGS.model_to_train.split(',')]
for m in model_to_train:
sub_loop(keypoint_model_fn, m, detail_params[m]['model_dir'], run_config, detail_params[m]['train_epochs'], detail_params[m]['epochs_per_eval'], detail_params[m]['lr_decay_factors'], detail_params[m]['decay_boundaries'], detail_params[m]['checkpoint_path'], detail_params[m]['checkpoint_exclude_scopes'], detail_params[m]['checkpoint_model_scope'], detail_params[m]['ignore_missing_vars'])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
# 0.0433054647096145
# blouse: 0.04104559849382152
# dress: 0.040321287576354434
# outwear: 0.04271434626552231
# skirt: 0.054697498510954054
# trousers: 0.04762229379563965
|
the-stack_106_23954 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
import re
import time
import os
from sqlalchemy import create_engine
import pandas as pd
import requests
__author__ = 'berniey'
re_limit = re.compile(r"^[0-9]*:[0-9]*")
re_ip = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
re_time = re.compile(r"^\d{4}\-\d{2}\-\d{2}\s\d{2}\:\d{2}\:\d{2}$")
engine_mysql = create_engine("mysql+pymysql://{}:{}@localhost:3306/cdnlog".format
(os.environ.get('mysql_role'), os.environ.get('mysql_password')))
engine_pg = create_engine("postgresql://{}:{}@localhost:5432/cdnlog".format(
os.environ.get('pg_role'), os.environ.get('pg_password')))
series_to_frame_by_kind = {
'get_ip_traffic': (['ip'], 'traffic'),
'get_ip_count': (['ip'], 'count'),
'get_url_traffic': (['url'], 'traffic'),
'get_url_count': (['url'], 'count'),
'get_code_count': (['code'], 'count'),
'get_url_code_count': (['url', 'code'], 'count'),
'get_ip_code_count': (['ip', 'code'], 'count'),
'get_ip_url_code_count': (['ip', 'url', 'code'], 'count'),
'get_time_traffic_count': (['time'], 'traffic'),
}
def singleton(cls, *args, **kw):
"""
@singleton
def fun():
"""
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
class SingletonMetaclass(type):
"""
Singleton Metaclass
__metaclass__ = SingletonMetaclass
"""
_inst = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._inst:
cls._inst[cls] = super(SingletonMetaclass, cls).__call__(*args)
return cls._inst[cls]
def traffic_decimal(x, pos):
"""
:param x: value
:param pos: placeholder
:return: diff unit abbreviation
"""
if x <= 1000:
return '{:1.0f}'.format(x)
elif 1000 < x <= 1000000:
return '{:1.0f}K'.format(x*1e-3)
elif 1000000 < x <= 1000000000:
return '{:1.1f}M'.format(x*1e-6)
elif 1000000000 <= x < 1000000000000:
return '{:1.2f}G'.format(x*1e-9)
elif x <= 1000000000000:
return '{:1.3f}P'.format(x * 1e-12)
return '{:1.0f}WTF'.format(x)
def data_after_argument(aim_data, *args, **kwargs):
"""
if limit doesn't match re,return all
if only :x return top x
if only x: return last x
if x:y return x to y
"""
l1 = kwargs.get('limit')[0]
l2 = kwargs.get('limit')[1]
if l1 >= 0 and l2:
return aim_data[l1:l2]
if l1 >= 0 and not l2:
return aim_data[l1:]
if not l1 and l2 >= 0:
return aim_data[:l2]
else:
return aim_data
def parse_limit(limit):
if not re_limit.match(limit):
return 0, 0
a = limit.split(':')
a1 = int(a[0]) if a[0] != '' else 0
a2 = int(a[1]) if a[1] != '' else None
return a1, a2
def parse_requests(request):
error = {}
graphic_kinds = ['line', 'hist', 'area', 'bar', 'barh', 'kde', 'area', 'pie']
kind = request.args.get('kind', 'line')
limit = request.args.get('limit', ':')
referer = request.args.get('referer', '')
use_index = request.args.get('use_index', True)
is_show = request.args.get('is_show', None)
dis_tick = request.args.get('dis_tick', '')
ip = request.args.get('ip', '')
start_time = request.args.get('start_time', '')
end_time = request.args.get('end_time', '')
is_qiniu = request.args.get('is_qiniu', 'True')
if kind not in graphic_kinds:
error['error_kind'] = "you must have a choice among 'line','hist', 'bar', 'barh', 'kde', 'pie' or 'area'"
if use_index in ['False', 'false', 'FALSE']:
use_index = False
if dis_tick:
if kind == 'barh':
dis_tick = 'y'
else:
dis_tick = 'x'
if ip and not re_ip.match(ip):
error['error_ip'] = "Please fill a Correct ip"
if start_time and not re_time.match(start_time):
error['error_start_time'] = "please fill a CORRECT start_time"
if end_time and not re_time.match(end_time):
error['error_end_time'] = "please fill a CORRECT end_time"
return error, kind, limit, use_index, is_show, dis_tick, ip, referer, start_time, end_time, is_qiniu
def convert_time_format(request_time):
"""
GMT convert to Beijing time
:return time
"""
struct_time = time.strptime(request_time, "[%d/%b/%Y:%X+0800]")
timestamp = time.mktime(struct_time) + 28800
time_array = time.localtime(timestamp)
time_date = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
# 如果需要时间戳并且不需要时间的绘图,可以加上timestamp
# return time_date, timestamp
return time_date
def save_data(data, data_kind, save_kind, path_or_table):
"""
Store data by arg
"""
if save_kind in ['mysql', 'pg', 'postgresql']:
if not path_or_table:
path_or_table = data_kind+'_'+str(time.time())
_save_database(data, data_kind, save_kind, path_or_table)
if save_kind == 'excel':
_save_file(data, data_kind, path_or_table, file_kinds='excel')
if save_kind == 'csv':
_save_file(data, data_kind, path_or_table, file_kinds='csv')
def print_summary_information(d, num=20):
dict = d.get_code_count(limit=parse_limit(':{}'.format(num))).to_dict()
print("\n打印日志汇总信息")
print('*'*50+'\n')
print("每个状态码所对应的访问次数\n")
print("状态码 访问次数")
for key, value in dict.items():
print("{:<4} : {:10}".format(key, value))
print("\n" + '*'*50 + "\n")
print("流量排名前{}的url\n".format(num))
print("{:<20}{}".format("流量(b)", "url", width=20))
dict = d.get_url_traffic(limit=parse_limit(':{}'.format(num))).to_dict()
for key, value in dict.items():
print("{:<20} : {:10}".format(value, key))
print("\n" + '*'*50 + "\n")
print("访问次数排名前{}的url\n".format(num))
print("{:<20}{}".format("访问次数", "url", width=20))
dict = d.get_url_count(limit=parse_limit(':{}'.format(num))).to_dict()
for key, value in dict.items():
print("{:<20} : {:10}".format(value, key))
print("\n" + '*'*50 + "\n")
print("流量排名前{}的IP\n".format(num))
print("{:<20}{}".format("流量", "IP", width=20))
dict = d.get_ip_traffic(limit=parse_limit(':{}'.format(num))).to_dict()
for key, value in dict.items():
print("{:<20} : {:10}".format(value, key))
print("\n" + '*'*50 + "\n")
print("访问次数排名前{}的IP\n".format(num))
print("{:<20}{}".format("访问次数", "IP", width=20))
dict = d.get_ip_count(limit=parse_limit(':{}'.format(num))).to_dict()
for key, value in dict.items():
print("{:<20} : {:10}".format(value, key))
def _save_file(data, data_kind, path, file_kinds):
columns_value = series_to_frame_by_kind.get(data_kind)
if columns_value:
data = series_to_dataframe(data, columns_value)
path = _path_and_mkdir(path)
if file_kinds == 'excel':
from openpyxl import load_workbook
if os.path.isfile(path):
with pd.ExcelWriter(path, engine='openpyxl') as writer:
writer.book = load_workbook(path)
data.to_excel(writer, data_kind)
else:
out = pd.ExcelWriter(path)
data.to_excel(out, data_kind)
out.save()
if file_kinds == 'csv':
data.to_csv(path)
def _save_database(data, data_kind, save_kind, table_name):
columns_value = series_to_frame_by_kind.get(data_kind)
#选择数据库引擎
engine = engine_mysql if save_kind == 'mysql' else engine_pg
if columns_value:
data = series_to_dataframe(data, columns_value)
data.to_sql(table_name, engine, if_exists='replace')
def series_to_dataframe(data, columns_value):
df = pd.DataFrame([i for i in data.index], columns=columns_value[0])
df[columns_value[1]] = data.values
return df
def _path_and_mkdir(path):
"""
if dir not exist, make one
"""
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
return path
def ip_address(ip):
"""
从淘宝和新浪获取ip的所在地
"""
sina_data = {"ip": ip,
"type": "sina"
}
sina_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
sina_r = requests.post("https://www.ipip.net/ip/ajax/", data=sina_data, headers=sina_headers)
sina_district = (eval(sina_r.content.decode()))
taobao_data = {"ip": ip,
"type": "taobao"
}
taobao_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
taobao_r = requests.post("https://www.ipip.net/ip/ajax/", data=taobao_data, headers=taobao_headers)
taobao_district = (eval(taobao_r.content.decode()))
return {"taobao": taobao_district, "sina": sina_district}
|
the-stack_106_23955 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of AEA agents project configuration."""
import os
from copy import deepcopy
from pathlib import Path
from shutil import rmtree
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from aea.aea import AEA
from aea.aea_builder import AEABuilder
from aea.cli.fetch import do_fetch
from aea.cli.issue_certificates import issue_certificates_
from aea.cli.utils.context import Context
from aea.configurations.base import AgentConfig, PublicId
from aea.configurations.constants import DEFAULT_REGISTRY_NAME
from aea.configurations.data_types import ComponentId
from aea.configurations.manager import AgentConfigManager
from aea.crypto.helpers import create_private_key, get_wallet_from_agent_config
from aea.exceptions import AEAValidationError, enforce
class _Base:
"""Base class to share some methods."""
@classmethod
def _get_agent_config(cls, path: Union[Path, str]) -> AgentConfig:
"""Get agent config instance."""
agent_config = AEABuilder.try_to_load_agent_configuration_file(path)
agent_config.check_aea_version()
return agent_config
@classmethod
def _get_builder(
cls,
agent_config: AgentConfig,
aea_project_path: Union[Path, str],
skip_consistency_check: bool = False,
) -> AEABuilder:
"""Get AEABuilder instance."""
builder = AEABuilder(
with_default_packages=False, build_dir_root=str(aea_project_path)
)
builder.set_from_configuration(
agent_config, Path(aea_project_path), skip_consistency_check
)
return builder
@property
def builder(self) -> AEABuilder:
"""Get AEABuilder instance."""
raise NotImplementedError # pragma: nocover
def install_pypi_dependencies(self) -> None:
"""Install python dependencies for the project."""
self.builder.install_pypi_dependencies()
class Project(_Base):
"""Agent project representation."""
__slots__ = ("public_id", "path", "agents")
def __init__(self, public_id: PublicId, path: str) -> None:
"""Init project with public_id and project's path."""
self.public_id: PublicId = public_id
self.path: str = path
self.agents: Set[str] = set()
def build(self) -> None:
"""Call all build entry points."""
self.builder.call_all_build_entrypoints()
@classmethod
def load(
cls,
working_dir: str,
public_id: PublicId,
is_local: bool = False,
is_remote: bool = False,
is_restore: bool = False,
cli_verbosity: str = "INFO",
registry_path: str = DEFAULT_REGISTRY_NAME,
skip_consistency_check: bool = False,
) -> "Project":
"""
Load project with given public_id to working_dir.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param working_dir: the working directory
:param public_id: the public id
:param is_local: whether to fetch from local
:param is_remote whether to fetch from remote
:param verbosity: the logging verbosity of the CLI
:param registry_path: the path to the registry locally
:param skip_consistency_check: consistency checks flag
"""
ctx = Context(
cwd=working_dir, verbosity=cli_verbosity, registry_path=registry_path
)
ctx.set_config("skip_consistency_check", skip_consistency_check)
path = os.path.join(working_dir, public_id.author, public_id.name)
target_dir = os.path.join(public_id.author, public_id.name)
if not is_restore and not os.path.exists(target_dir):
do_fetch(ctx, public_id, is_local, is_remote, target_dir=target_dir)
return cls(public_id, path)
def remove(self) -> None:
"""Remove project, do cleanup."""
rmtree(self.path)
@property
def builder(self) -> AEABuilder:
"""Get builder instance."""
return self._get_builder(self._get_agent_config(self.path), self.path)
def check(self) -> None:
"""Check we can still construct an AEA from the project with builder.build."""
_ = self.builder
class AgentAlias(_Base):
"""Agent alias representation."""
__slots__ = ("project", "agent_name", "_data_dir", "_agent_config")
def __init__(
self,
project: Project,
agent_name: str,
data_dir: str,
password: Optional[str] = None,
):
"""Init agent alias with project, config, name, agent, builder."""
self.project = project
self.agent_name = agent_name
self._data_dir = data_dir
if not os.path.exists(self._data_dir):
os.makedirs(self._data_dir)
self._agent_config: AgentConfig = self._get_agent_config(project.path)
self._password = password
self._ensure_private_keys()
def set_agent_config_from_data(self, json_data: List[Dict]) -> None:
"""
Set agent config instance constructed from json data.
:param json_data: agent config json data
:return: None
"""
self._agent_config = AEABuilder.loader.load_agent_config_from_json(json_data)
self._ensure_private_keys()
def _ensure_private_keys(self) -> None:
"""Add private keys if not present in the config."""
builder = self._get_builder(self.agent_config, self.project.path)
default_ledger = builder.get_default_ledger()
required_ledgers = builder.get_required_ledgers()
enforce(
default_ledger in required_ledgers,
exception_text=f"Default ledger '{default_ledger}' not in required ledgers: {required_ledgers}",
exception_class=AEAValidationError,
)
available_private_keys = self.agent_config.private_key_paths.keys()
available_connection_private_keys = (
self.agent_config.connection_private_key_paths.keys()
)
for required_ledger in set(required_ledgers):
if required_ledger not in available_private_keys:
self.agent_config.private_key_paths.create(
required_ledger, self._create_private_key(required_ledger)
)
if required_ledger not in available_connection_private_keys:
self.agent_config.connection_private_key_paths.create(
required_ledger,
self._create_private_key(required_ledger, is_connection=True),
)
@property
def builder(self) -> AEABuilder:
"""Get builder instance."""
builder = self._get_builder(self.agent_config, self.project.path)
builder.set_name(self.agent_name)
builder.set_runtime_mode("threaded")
builder.set_data_dir(self._data_dir)
return builder
@property
def agent_config(self) -> AgentConfig:
"""Get agent config."""
return self._agent_config
def _create_private_key(
self, ledger: str, replace: bool = False, is_connection: bool = False,
) -> str:
"""
Create new key for agent alias in working dir keys dir.
If file exists, check `replace` option.
:param ledger: the ledger id
:param replace: whether or not to replace an existing key
:param is_connection: whether or not it is a connection key
"""
file_name = (
f"{ledger}_connection_private.key"
if is_connection
else f"{ledger}_private.key"
)
filepath = os.path.join(self._data_dir, file_name)
if os.path.exists(filepath) and not replace:
return filepath
create_private_key(ledger, filepath, password=self._password)
return filepath
def remove_from_project(self) -> None:
"""Remove agent alias from project."""
self.project.agents.remove(self.agent_name)
@property
def dict(self) -> Dict[str, Any]:
"""Convert AgentAlias to dict."""
return {
"public_id": str(self.project.public_id),
"agent_name": self.agent_name,
"config": self.config_json,
}
@property
def config_json(self) -> List[Dict]:
"""Get agent config json data."""
json_data = self.agent_config.ordered_json
result: List[Dict] = [json_data] + json_data.pop("component_configurations", {})
return result
def get_aea_instance(self) -> AEA:
"""Build new aea instance."""
self.issue_certificates()
aea = self.builder.build(password=self._password)
# override build dir to project's one
aea.DEFAULT_BUILD_DIR_NAME = os.path.join(
self.project.path, aea.DEFAULT_BUILD_DIR_NAME
)
return aea
def issue_certificates(self) -> None:
"""Issue the certificates for this agent."""
issue_certificates_(
self.project.path,
self.agent_config_manager,
path_prefix=self._data_dir,
password=self._password,
)
def set_overrides(
self,
agent_overrides: Optional[Dict] = None,
component_overrides: Optional[List[Dict]] = None,
) -> None:
"""Set override for this agent alias's config."""
overrides = deepcopy(agent_overrides or {})
component_configurations: Dict[ComponentId, Dict] = {}
for component_override in deepcopy(component_overrides or []):
try:
component_id = ComponentId.from_json(
{"version": "any", **component_override}
)
component_override.pop("author")
component_override.pop("name")
component_override.pop("type")
component_override.pop("version")
component_configurations[component_id] = component_override
except (ValueError, KeyError) as e: # pragma: nocover
raise ValueError(
f"Component overrides are incorrect: {e} during process: {component_override}"
)
overrides["component_configurations"] = component_configurations
return self.agent_config_manager.update_config(overrides)
@property
def agent_config_manager(self) -> AgentConfigManager:
"""Get agent configuration manager instance for the config."""
return AgentConfigManager(self.agent_config, self.project.path)
def get_overridables(self) -> Tuple[Dict, List[Dict]]:
"""Get all overridables for this agent alias's config."""
(
agent_overridables,
components_overridables,
) = self.agent_config_manager.get_overridables()
components_configurations = []
for component_id, obj in components_overridables.items():
if not obj: # pragma: nocover
continue
obj.update(component_id.json)
components_configurations.append(obj)
return agent_overridables, components_configurations
def get_addresses(self) -> Dict[str, str]:
"""
Get addresses from private keys.
:return: dict with crypto id str as key and address str as value
"""
wallet = get_wallet_from_agent_config(
self.agent_config, password=self._password
)
return wallet.addresses
def get_connections_addresses(self) -> Dict[str, str]:
"""
Get connections addresses from connections private keys.
:return: dict with crypto id str as key and address str as value
"""
wallet = get_wallet_from_agent_config(
self.agent_config, password=self._password
)
return wallet.connection_cryptos.addresses
|
the-stack_106_23956 | # coding: utf-8
# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
def _numerical_gradient_no_batch(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 値を元に戻す
return grad
def numerical_gradient(f, X):
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
def function_2(x):
if x.ndim == 1:
return np.sum(x**2)
else:
return np.sum(x**2, axis=1)
def tangent_line(f, x):
d = numerical_gradient(f, x)
print(d)
y = f(x) - d*x
return lambda t: d*t + y
if __name__ == '__main__':
x0 = np.arange(-2, 2.5, 0.25)
x1 = np.arange(-2, 2.5, 0.25)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
grad = numerical_gradient(function_2, np.array([X, Y]) )
plt.figure()
plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666")#,headwidth=10,scale=40,color="#444444")
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
plt.legend()
plt.draw()
plt.show() |
the-stack_106_23957 | # -*- coding: utf-8 -*-
"""
DCGAN Tutorial
==============
**Author**: `Nathan Inkawhich <https://github.com/inkawhich>`__
"""
######################################################################
# Introduction
# ------------
#
# This tutorial will give an introduction to DCGANs through an example. We
# will train a generative adversarial network (GAN) to generate new
# celebrities after showing it pictures of many real celebrities. Most of
# the code here is from the dcgan implementation in
# `pytorch/examples <https://github.com/pytorch/examples>`__, and this
# document will give a thorough explanation of the implementation and shed
# light on how and why this model works. But don’t worry, no prior
# knowledge of GANs is required, but it may require a first-timer to spend
# some time reasoning about what is actually happening under the hood.
# Also, for the sake of time it will help to have a GPU, or two. Lets
# start from the beginning.
#
# Generative Adversarial Networks
# -------------------------------
#
# What is a GAN?
# ~~~~~~~~~~~~~~
#
# GANs are a framework for teaching a DL model to capture the training
# data’s distribution so we can generate new data from that same
# distribution. GANs were invented by Ian Goodfellow in 2014 and first
# described in the paper `Generative Adversarial
# Nets <https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf>`__.
# They are made of two distinct models, a *generator* and a
# *discriminator*. The job of the generator is to spawn ‘fake’ images that
# look like the training images. The job of the discriminator is to look
# at an image and output whether or not it is a real training image or a
# fake image from the generator. During training, the generator is
# constantly trying to outsmart the discriminator by generating better and
# better fakes, while the discriminator is working to become a better
# detective and correctly classify the real and fake images. The
# equilibrium of this game is when the generator is generating perfect
# fakes that look as if they came directly from the training data, and the
# discriminator is left to always guess at 50% confidence that the
# generator output is real or fake.
#
# Now, lets define some notation to be used throughout tutorial starting
# with the discriminator. Let :math:`x` be data representing an image.
# :math:`D(x)` is the discriminator network which outputs the (scalar)
# probability that :math:`x` came from training data rather than the
# generator. Here, since we are dealing with images the input to
# :math:`D(x)` is an image of CHW size 3x64x64. Intuitively, :math:`D(x)`
# should be HIGH when :math:`x` comes from training data and LOW when
# :math:`x` comes from the generator. :math:`D(x)` can also be thought of
# as a traditional binary classifier.
#
# For the generator’s notation, let :math:`z` be a latent space vector
# sampled from a standard normal distribution. :math:`G(z)` represents the
# generator function which maps the latent vector :math:`z` to data-space.
# The goal of :math:`G` is to estimate the distribution that the training
# data comes from (:math:`p_{data}`) so it can generate fake samples from
# that estimated distribution (:math:`p_g`).
#
# So, :math:`D(G(z))` is the probability (scalar) that the output of the
# generator :math:`G` is a real image. As described in `Goodfellow’s
# paper <https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf>`__,
# :math:`D` and :math:`G` play a minimax game in which :math:`D` tries to
# maximize the probability it correctly classifies reals and fakes
# (:math:`logD(x)`), and :math:`G` tries to minimize the probability that
# :math:`D` will predict its outputs are fake (:math:`log(1-D(G(x)))`).
# From the paper, the GAN loss function is
#
# .. math:: \underset{G}{\text{min}} \underset{D}{\text{max}}V(D,G) = \mathbb{E}_{x\sim p_{data}(x)}\big[logD(x)\big] + \mathbb{E}_{z\sim p_{z}(z)}\big[log(1-D(G(z)))\big]
#
# In theory, the solution to this minimax game is where
# :math:`p_g = p_{data}`, and the discriminator guesses randomly if the
# inputs are real or fake. However, the convergence theory of GANs is
# still being actively researched and in reality models do not always
# train to this point.
#
# What is a DCGAN?
# ~~~~~~~~~~~~~~~~
#
# A DCGAN is a direct extension of the GAN described above, except that it
# explicitly uses convolutional and convolutional-transpose layers in the
# discriminator and generator, respectively. It was first described by
# Radford et. al. in the paper `Unsupervised Representation Learning With
# Deep Convolutional Generative Adversarial
# Networks <https://arxiv.org/pdf/1511.06434.pdf>`__. The discriminator
# is made up of strided
# `convolution <https://pytorch.org/docs/stable/nn.html#torch.nn.Conv2d>`__
# layers, `batch
# norm <https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm2d>`__
# layers, and
# `LeakyReLU <https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU>`__
# activations. The input is a 3x64x64 input image and the output is a
# scalar probability that the input is from the real data distribution.
# The generator is comprised of
# `convolutional-transpose <https://pytorch.org/docs/stable/nn.html#torch.nn.ConvTranspose2d>`__
# layers, batch norm layers, and
# `ReLU <https://pytorch.org/docs/stable/nn.html#relu>`__ activations. The
# input is a latent vector, :math:`z`, that is drawn from a standard
# normal distribution and the output is a 3x64x64 RGB image. The strided
# conv-transpose layers allow the latent vector to be transformed into a
# volume with the same shape as an image. In the paper, the authors also
# give some tips about how to setup the optimizers, how to calculate the
# loss functions, and how to initialize the model weights, all of which
# will be explained in the coming sections.
#
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# from IPython.display import HTML
import actnn
# Set random seed for reproducibility
manualSeed = 999
# manualSeed = random.randint(1, 10000) # use if you want new results
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
######################################################################
# Inputs
# ------
#
# Let’s define some inputs for the run:
#
# - **dataroot** - the path to the root of the dataset folder. We will
# talk more about the dataset in the next section
# - **workers** - the number of worker threads for loading the data with
# the DataLoader
# - **batch_size** - the batch size used in training. The DCGAN paper
# uses a batch size of 128
# - **image_size** - the spatial size of the images used for training.
# This implementation defaults to 64x64. If another size is desired,
# the structures of D and G must be changed. See
# `here <https://github.com/pytorch/examples/issues/70>`__ for more
# details
# - **nc** - number of color channels in the input images. For color
# images this is 3
# - **nz** - length of latent vector
# - **ngf** - relates to the depth of feature maps carried through the
# generator
# - **ndf** - sets the depth of feature maps propagated through the
# discriminator
# - **num_epochs** - number of training epochs to run. Training for
# longer will probably lead to better results but will also take much
# longer
# - **lr** - learning rate for training. As described in the DCGAN paper,
# this number should be 0.0002
# - **beta1** - beta1 hyperparameter for Adam optimizers. As described in
# paper, this number should be 0.5
# - **ngpu** - number of GPUs available. If this is 0, code will run in
# CPU mode. If this number is greater than 0 it will run on that number
# of GPUs
#
# Root directory for dataset
dataroot = "/data/jianfei/celeba"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 128
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 64
# Number of channels in the training images. For color images this is 3
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 64
# Size of feature maps in discriminator
ndf = 64
# Number of training epochs
num_epochs = 5
# Learning rate for optimizers
lr = 0.0002
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1
######################################################################
# Data
# ----
#
# In this tutorial we will use the `Celeb-A Faces
# dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`__ which can
# be downloaded at the linked site, or in `Google
# Drive <https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZzg>`__.
# The dataset will download as a file named *img_align_celeba.zip*. Once
# downloaded, create a directory named *celeba* and extract the zip file
# into that directory. Then, set the *dataroot* input for this notebook to
# the *celeba* directory you just created. The resulting directory
# structure should be:
#
# ::
#
# /path/to/celeba
# -> img_align_celeba
# -> 188242.jpg
# -> 173822.jpg
# -> 284702.jpg
# -> 537394.jpg
# ...
#
# This is an important step because we will be using the ImageFolder
# dataset class, which requires there to be subdirectories in the
# dataset’s root folder. Now, we can create the dataset, create the
# dataloader, set the device to run on, and finally visualize some of the
# training data.
#
# We can use an image folder dataset the way we have it setup.
# Create the dataset
dataset = dset.ImageFolder(root=dataroot,
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
# Create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=workers)
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(), (1, 2, 0)))
######################################################################
# Implementation
# --------------
#
# With our input parameters set and the dataset prepared, we can now get
# into the implementation. We will start with the weight initialization
# strategy, then talk about the generator, discriminator, loss functions,
# and training loop in detail.
#
# Weight Initialization
# ~~~~~~~~~~~~~~~~~~~~~
#
# From the DCGAN paper, the authors specify that all model weights shall
# be randomly initialized from a Normal distribution with mean=0,
# stdev=0.02. The ``weights_init`` function takes an initialized model as
# input and reinitializes all convolutional, convolutional-transpose, and
# batch normalization layers to meet this criteria. This function is
# applied to the models immediately after initialization.
#
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
######################################################################
# Generator
# ~~~~~~~~~
#
# The generator, :math:`G`, is designed to map the latent space vector
# (:math:`z`) to data-space. Since our data are images, converting
# :math:`z` to data-space means ultimately creating a RGB image with the
# same size as the training images (i.e. 3x64x64). In practice, this is
# accomplished through a series of strided two dimensional convolutional
# transpose layers, each paired with a 2d batch norm layer and a relu
# activation. The output of the generator is fed through a tanh function
# to return it to the input data range of :math:`[-1,1]`. It is worth
# noting the existence of the batch norm functions after the
# conv-transpose layers, as this is a critical contribution of the DCGAN
# paper. These layers help with the flow of gradients during training. An
# image of the generator from the DCGAN paper is shown below.
#
# .. figure:: /_static/img/dcgan_generator.png
# :alt: dcgan_generator
#
# Notice, the how the inputs we set in the input section (*nz*, *ngf*, and
# *nc*) influence the generator architecture in code. *nz* is the length
# of the z input vector, *ngf* relates to the size of the feature maps
# that are propagated through the generator, and *nc* is the number of
# channels in the output image (set to 3 for RGB images). Below is the
# code for the generator.
#
# Generator Code
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
######################################################################
# Now, we can instantiate the generator and apply the ``weights_init``
# function. Check out the printed model to see how the generator object is
# structured.
#
# Create the generator
netG = Generator(ngpu)
netG = actnn.QModule(netG)
netG = netG.to(device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netG = nn.DataParallel(netG, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netG.apply(weights_init)
# Print the model
print(netG)
######################################################################
# Discriminator
# ~~~~~~~~~~~~~
#
# As mentioned, the discriminator, :math:`D`, is a binary classification
# network that takes an image as input and outputs a scalar probability
# that the input image is real (as opposed to fake). Here, :math:`D` takes
# a 3x64x64 input image, processes it through a series of Conv2d,
# BatchNorm2d, and LeakyReLU layers, and outputs the final probability
# through a Sigmoid activation function. This architecture can be extended
# with more layers if necessary for the problem, but there is significance
# to the use of the strided convolution, BatchNorm, and LeakyReLUs. The
# DCGAN paper mentions it is a good practice to use strided convolution
# rather than pooling to downsample because it lets the network learn its
# own pooling function. Also batch norm and leaky relu functions promote
# healthy gradient flow which is critical for the learning process of both
# :math:`G` and :math:`D`.
#
#########################################################################
# Discriminator Code
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
######################################################################
# Now, as with the generator, we can create the discriminator, apply the
# ``weights_init`` function, and print the model’s structure.
#
# Create the Discriminator
netD = Discriminator(ngpu)
netD = actnn.QModule(netD)
netD = netD.to(device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netD = nn.DataParallel(netD, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netD.apply(weights_init)
# Print the model
print(netD)
######################################################################
# Loss Functions and Optimizers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# With :math:`D` and :math:`G` setup, we can specify how they learn
# through the loss functions and optimizers. We will use the Binary Cross
# Entropy loss
# (`BCELoss <https://pytorch.org/docs/stable/nn.html#torch.nn.BCELoss>`__)
# function which is defined in PyTorch as:
#
# .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
#
# Notice how this function provides the calculation of both log components
# in the objective function (i.e. :math:`log(D(x))` and
# :math:`log(1-D(G(z)))`). We can specify what part of the BCE equation to
# use with the :math:`y` input. This is accomplished in the training loop
# which is coming up soon, but it is important to understand how we can
# choose which component we wish to calculate just by changing :math:`y`
# (i.e. GT labels).
#
# Next, we define our real label as 1 and the fake label as 0. These
# labels will be used when calculating the losses of :math:`D` and
# :math:`G`, and this is also the convention used in the original GAN
# paper. Finally, we set up two separate optimizers, one for :math:`D` and
# one for :math:`G`. As specified in the DCGAN paper, both are Adam
# optimizers with learning rate 0.0002 and Beta1 = 0.5. For keeping track
# of the generator’s learning progression, we will generate a fixed batch
# of latent vectors that are drawn from a Gaussian distribution
# (i.e. fixed_noise) . In the training loop, we will periodically input
# this fixed_noise into :math:`G`, and over the iterations we will see
# images form out of the noise.
#
# Initialize BCELoss function
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1.
fake_label = 0.
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
######################################################################
# Training
# ~~~~~~~~
#
# Finally, now that we have all of the parts of the GAN framework defined,
# we can train it. Be mindful that training GANs is somewhat of an art
# form, as incorrect hyperparameter settings lead to mode collapse with
# little explanation of what went wrong. Here, we will closely follow
# Algorithm 1 from Goodfellow’s paper, while abiding by some of the best
# practices shown in `ganhacks <https://github.com/soumith/ganhacks>`__.
# Namely, we will “construct different mini-batches for real and fake”
# images, and also adjust G’s objective function to maximize
# :math:`logD(G(z))`. Training is split up into two main parts. Part 1
# updates the Discriminator and Part 2 updates the Generator.
#
# **Part 1 - Train the Discriminator**
#
# Recall, the goal of training the discriminator is to maximize the
# probability of correctly classifying a given input as real or fake. In
# terms of Goodfellow, we wish to “update the discriminator by ascending
# its stochastic gradient”. Practically, we want to maximize
# :math:`log(D(x)) + log(1-D(G(z)))`. Due to the separate mini-batch
# suggestion from ganhacks, we will calculate this in two steps. First, we
# will construct a batch of real samples from the training set, forward
# pass through :math:`D`, calculate the loss (:math:`log(D(x))`), then
# calculate the gradients in a backward pass. Secondly, we will construct
# a batch of fake samples with the current generator, forward pass this
# batch through :math:`D`, calculate the loss (:math:`log(1-D(G(z)))`),
# and *accumulate* the gradients with a backward pass. Now, with the
# gradients accumulated from both the all-real and all-fake batches, we
# call a step of the Discriminator’s optimizer.
#
# **Part 2 - Train the Generator**
#
# As stated in the original paper, we want to train the Generator by
# minimizing :math:`log(1-D(G(z)))` in an effort to generate better fakes.
# As mentioned, this was shown by Goodfellow to not provide sufficient
# gradients, especially early in the learning process. As a fix, we
# instead wish to maximize :math:`log(D(G(z)))`. In the code we accomplish
# this by: classifying the Generator output from Part 1 with the
# Discriminator, computing G’s loss *using real labels as GT*, computing
# G’s gradients in a backward pass, and finally updating G’s parameters
# with an optimizer step. It may seem counter-intuitive to use the real
# labels as GT labels for the loss function, but this allows us to use the
# :math:`log(x)` part of the BCELoss (rather than the :math:`log(1-x)`
# part) which is exactly what we want.
#
# Finally, we will do some statistic reporting and at the end of each
# epoch we will push our fixed_noise batch through the generator to
# visually track the progress of G’s training. The training statistics
# reported are:
#
# - **Loss_D** - discriminator loss calculated as the sum of losses for
# the all real and all fake batches (:math:`log(D(x)) + log(D(G(z)))`).
# - **Loss_G** - generator loss calculated as :math:`log(D(G(z)))`
# - **D(x)** - the average output (across the batch) of the discriminator
# for the all real batch. This should start close to 1 then
# theoretically converge to 0.5 when G gets better. Think about why
# this is.
# - **D(G(z))** - average discriminator outputs for the all fake batch.
# The first number is before D is updated and the second number is
# after D is updated. These numbers should start near 0 and converge to
# 0.5 as G gets better. Think about why this is.
#
# **Note:** This step might take a while, depending on how many epochs you
# run and if you removed some data from the dataset.
#
# Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch in the dataloader
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Output training stats
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, num_epochs, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == num_epochs - 1) and (i == len(dataloader) - 1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
######################################################################
# Results
# -------
#
# Finally, lets check out how we did. Here, we will look at three
# different results. First, we will see how D and G’s losses changed
# during training. Second, we will visualize G’s output on the fixed_noise
# batch for every epoch. And third, we will look at a batch of real data
# next to a batch of fake data from G.
#
# **Loss versus training iteration**
#
# Below is a plot of D & G’s losses versus training iterations.
#
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses, label="G")
plt.plot(D_losses, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.savefig('loss.png')
######################################################################
# **Visualization of G’s progression**
#
# Remember how we saved the generator’s output on the fixed_noise batch
# after every epoch of training. Now, we can visualize the training
# progression of G with an animation. Press the play button to start the
# animation.
#
# %%capture
# fig = plt.figure(figsize=(8, 8))
# plt.axis("off")
# ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)] for i in img_list]
# ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
# HTML(ani.to_jshtml())
######################################################################
# **Real Images vs. Fake Images**
#
# Finally, lets take a look at some real images and fake images side by
# side.
#
# Grab a batch of real images from the dataloader
real_batch = next(iter(dataloader))
# Plot the real images
plt.figure(figsize=(15, 15))
plt.subplot(1, 2, 1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(), (1, 2, 0)))
plt.savefig('real_images.png')
# Plot the fake images from the last epoch
plt.subplot(1, 2, 2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1], (1, 2, 0)))
plt.show()
plt.savefig('fake_images.png')
######################################################################
# Where to Go Next
# ----------------
#
# We have reached the end of our journey, but there are several places you
# could go from here. You could:
#
# - Train for longer to see how good the results get
# - Modify this model to take a different dataset and possibly change the
# size of the images and the model architecture
# - Check out some other cool GAN projects
# `here <https://github.com/nashory/gans-awesome-applications>`__
# - Create GANs that generate
# `music <https://deepmind.com/blog/wavenet-generative-model-raw-audio/>`__
#
|
the-stack_106_23958 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_kronecker as kronecker
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _kronecker_dense(factors):
"""Convert a list of factors, into a dense Kronecker product."""
product = factors[0]
for factor in factors[1:]:
product = product[..., array_ops.newaxis, :, array_ops.newaxis]
factor_to_mul = factor[..., array_ops.newaxis, :, array_ops.newaxis, :]
product *= factor_to_mul
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
return product
class KroneckerDenseTest(test.TestCase):
"""Test of `_kronecker_dense` function."""
def test_kronecker_dense_matrix(self):
x = ops.convert_to_tensor([[2., 3.], [1., 2.]], dtype=dtypes.float32)
y = ops.convert_to_tensor([[1., 2.], [5., -1.]], dtype=dtypes.float32)
# From explicitly writing out the kronecker product of x and y.
z = ops.convert_to_tensor([
[2., 4., 3., 6.],
[10., -2., 15., -3.],
[1., 2., 2., 4.],
[5., -1., 10., -2.]], dtype=dtypes.float32)
# From explicitly writing out the kronecker product of y and x.
w = ops.convert_to_tensor([
[2., 3., 4., 6.],
[1., 2., 2., 4.],
[10., 15., -2., -3.],
[5., 10., -1., -2.]], dtype=dtypes.float32)
self.assertAllClose(
self.evaluate(_kronecker_dense([x, y])), self.evaluate(z))
self.assertAllClose(
self.evaluate(_kronecker_dense([y, x])), self.evaluate(w))
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorKroneckerTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((1, 1), factors=[(1, 1), (1, 1)]),
shape_info((8, 8), factors=[(2, 2), (2, 2), (2, 2)]),
shape_info((12, 12), factors=[(2, 2), (3, 3), (2, 2)]),
shape_info((1, 3, 3), factors=[(1, 1), (1, 3, 3)]),
shape_info((3, 6, 6), factors=[(3, 1, 1), (1, 2, 2), (1, 3, 3)]),
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Kronecker products constructed below will be from symmetric
# positive-definite matrices.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
expected_factors = build_info.__dict__["factors"]
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True)
for block_shape in expected_factors
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(m, shape=None) for m in matrices]
operator = kronecker.LinearOperatorKronecker(
[linalg.LinearOperatorFullMatrix(
l,
is_square=True,
is_self_adjoint=True,
is_positive_definite=True)
for l in lin_op_matrices])
matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)
kronecker_dense = _kronecker_dense(matrices)
if not use_placeholder:
kronecker_dense.set_shape(shape)
return operator, kronecker_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = kronecker.LinearOperatorKronecker(
[linalg.LinearOperatorFullMatrix(matrix),
linalg.LinearOperatorFullMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = kronecker.LinearOperatorKronecker(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
kronecker.LinearOperatorKronecker(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right")
operator = kronecker.LinearOperatorKronecker([operator_1, operator_2])
self.assertEqual("left_x_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
kronecker.LinearOperatorKronecker(operators)
def test_empty_or_one_operators_raises(self):
with self.assertRaisesRegexp(ValueError, ">=1 operators"):
kronecker.LinearOperatorKronecker([])
def test_kronecker_adjoint_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
],
is_non_singular=True,
)
adjoint = operator.adjoint()
self.assertIsInstance(
adjoint,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(adjoint.operators))
def test_kronecker_cholesky_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
],
is_positive_definite=True,
is_self_adjoint=True,
)
cholesky_factor = operator.cholesky()
self.assertIsInstance(
cholesky_factor,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(cholesky_factor.operators))
self.assertIsInstance(
cholesky_factor.operators[0],
lower_triangular.LinearOperatorLowerTriangular)
self.assertIsInstance(
cholesky_factor.operators[1],
lower_triangular.LinearOperatorLowerTriangular)
def test_kronecker_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(inverse.operators))
def test_tape_safe(self):
matrix_1 = variables_module.Variable([[1., 0.], [0., 1.]])
matrix_2 = variables_module.Variable([[2., 0.], [0., 2.]])
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix_1, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix_2, is_non_singular=True),
],
is_non_singular=True,
)
self.check_tape_safe(operator)
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorKroneckerTest)
test.main()
|
the-stack_106_23959 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from typing import *
import numpy as np
import pickle
import time
import rospy
import math
from geometry_msgs.msg import PoseStamped
from nics_ros_host.msg import global_state
from nics_ros_host.msg import vehicle_state
from nics_ros_host.msg import human_cmd
from nics_ros_host.srv import *
import struct
import threading
import argparse
from MultiVehicleEnv.environment import MultiVehicleEnv
import MultiVehicleEnv.scenarios as scenarios
#parser = argparse.ArgumentParser(description="GUI for Multi-VehicleEnv")
#parser.add_argument('--guiport',type=str,default='/dev/shm/gui_port')
#parser.add_argument('--usegui', action='store_true', default=False)
#parser.add_argument('--step-t',type=float,default=1.0)
#parser.add_argument('--sim-step',type=int,default=100)
#parser.add_argument('--direction_alpha', type=float, default=1.0)
#parser.add_argument('--add_direction_encoder',type=str, default='')
#args = parser.parse_args()
class RosWorld(object):
def __init__(self,scenario_name, args):
RosScenario = scenarios.load(scenario_name + ".py").Scenario()
self.world = RosScenario.make_world(args)
self.env = MultiVehicleEnv(self.world, RosScenario.reset_world, RosScenario.reward, RosScenario.observation,RosScenario.info)
agent_num = len(self.world.vehicles)
rospy.set_param('/All_Ready',0)
rospy.set_param('/car_id_list',[])
while True:
car_id_list = rospy.get_param('/car_id_list')
for i in range(agent_num):
if i not in car_id_list:
print("Car %i not ready. Plz Check again.",i)
break
if len(car_id_list) == agent_num:
print("All Cars ready. Let's go.")
rospy.set_param('/All_Ready',1)
break
time.sleep(0.5)
rospy.init_node("RosWorld")
pub = rospy.Publisher('global_state', global_state , queue_size=10)
#service list
command_service_list = []
####### spin Parts #########
obs_messenger = rospy.Service('get_obs',obs,self.obs_calculate)
cmd_sub = rospy.Subscriber('human_cmd', human_cmd, self.cmd_callback)
for i in range(agent_num):
command_service_list.append(rospy.ServiceProxy("supervisor_c"+str(car_id_list[i]),sup))
rospy.Subscriber('/vrpn_client_node/c' + str(i+1) + '/pose', PoseStamped, self.pose_callback, (i, pub, command_service_list))
rospy.spin()
def pose_callback(self, msg, arg):
car_id = arg[0]
pub = arg[1]
global command_list
command_list = arg[2]
vehicle_list = self.world.vehicles
agent_num = len(vehicle_list)
seq = msg.header.seq
stamp = msg.header.stamp
x = msg.pose.position.x
y = msg.pose.position.y
z = msg.pose.position.z
qx = msg.pose.orientation.x
qy = msg.pose.orientation.y
qz = msg.pose.orientation.z
qw = msg.pose.orientation.w
roll = math.atan2(2*(qw*qx+qy*qz),1-2*(qx*qx+qy*qy))
pitch = math.asin(2*(qw*qy-qz*qx))
yaw = math.atan2(2*(qw*qz+qx*qy),1-2*(qz*qz+qy*qy))
#rospy.loginfo("yaw:%f,pitch:%f,roll:%f"%(yaw,pitch,roll))
#if(yaw > math.pi):
# yaw = yaw - 2 * math.pi
#elif(yaw <= -math.pi):
# yaw = yaw + 2 * math.pi
vehicle_list[car_id].state.coordinate = [x, z]
vehicle_list[car_id].state.theta = pitch
self.world._check_collision()
for i in range(agent_num):
#print("car",i)
#print("movable",world.vehicles[i].state.movable)
#print("crashed",world.vehicles[i].state.crashed)
if self.world.vehicles[i].state.movable == False or self.world.vehicles[i].state.crashed == True:
result = command_list[i].call(i,self.world.vehicles[i].state.movable,self.world.vehicles[i].state.crashed)
pub.publish(global_state([vehicle_state(
self.world.vehicles[i].state.coordinate,
self.world.vehicles[i].state.theta,
self.world.vehicles[i].state.vel_b,
self.world.vehicles[i].state.phi,
self.world.vehicles[i].state.ctrl_vel_b,
self.world.vehicles[i].state.ctrl_phi,
self.world.vehicles[i].state.movable,
self.world.vehicles[i].state.crashed
) for i in range(agent_num)]))
# maybe put world.dataslot in the topic
#rospy.loginfo("RigidBody0%d[coordinate]: Header seq = %d; Position x = %f, y = %f, z = %f; Orientation roll = %f, pitch = %f, yaw = %f.", car_id+1, seq, x, y, z, roll, pitch, yaw)
#if count % 5 == 0 and count < 2000:
def obs_calculate(self,req):
car_id = req.car_id
self.world.dumpGUI()
rospy.loginfo("Calculate obs for car %i",car_id)
return obsResponse([2],[2],[2])
def cmd_callback(self,msg):
#add how to deal with dataslot
rospy.loginfo("Cmdline get ", msg.cmd)
class rosarg(object):
pass
if __name__ == '__main__':
args = rosarg()
args.guiport='/dev/shm/gui_port'
args.usegui=False
args.step_t=1.0
args.sim_step=100
args.direction_alpha=1.0
args.add_direction_encoder=''
ros_env = RosWorld('3p2t2f',args) |
the-stack_106_23961 | """
Python 3 Object-Oriented Programming
Chapter 12. Advanced Python Design Patterns
"""
from __future__ import annotations
import contextlib
import csv
from pathlib import Path
import sqlite3
from typing import ContextManager, TextIO, cast, Optional
import sys
def test_setup(db_name: str = "sales.db") -> sqlite3.Connection:
conn = sqlite3.connect(db_name)
conn.execute(
"""
CREATE TABLE IF NOT EXISTS Sales (
salesperson text,
amt currency,
year integer,
model text,
new boolean
)
"""
)
conn.execute(
"""
DELETE FROM Sales
"""
)
conn.execute(
"""
INSERT INTO Sales
VALUES('Tim', 16000, 2010, 'Honda Fit', 'true')
"""
)
conn.execute(
"""
INSERT INTO Sales
VALUES('Tim', 9000, 2006, 'Ford Focus', 'false')
"""
)
conn.execute(
"""
INSERT INTO Sales
VALUES('Hannah', 8000, 2004, 'Dodge Neon', 'false')
"""
)
conn.execute(
"""
INSERT INTO Sales
VALUES('Hannah', 28000, 2009, 'Ford Mustang', 'true')
"""
)
conn.execute(
"""
INSERT INTO Sales
VALUES('Hannah', 50000, 2010, 'Lincoln Navigator', 'true')
"""
)
conn.execute(
"""
INSERT INTO Sales
VALUES('Jason', 20000, 2008, 'Toyota Prius', 'false')
"""
)
conn.commit()
return conn
class QueryTemplate:
def __init__(self, db_name: str = "sales.db") -> None:
self.db_name = db_name
self.conn: sqlite3.Connection
self.results: list[tuple[str, ...]]
self.query: str
self.header: list[str]
def connect(self) -> None:
self.conn = sqlite3.connect(self.db_name)
def construct_query(self) -> None:
raise NotImplementedError("construct_query not implemented")
def do_query(self) -> None:
results = self.conn.execute(self.query)
self.results = results.fetchall()
def output_context(self) -> ContextManager[TextIO]:
self.target_file = sys.stdout
return cast(ContextManager[TextIO], contextlib.nullcontext())
def output_results(self) -> None:
writer = csv.writer(self.target_file)
writer.writerow(self.header)
writer.writerows(self.results)
def process_format(self) -> None:
self.connect()
self.construct_query()
self.do_query()
with self.output_context():
self.output_results()
import datetime
class NewVehiclesQuery(QueryTemplate):
def construct_query(self) -> None:
self.query = """
SELECT * FROM Sales WHERE new='true'
"""
self.header = ["salesperson", "amt", "year", "model", "new"]
class SalesGrossQuery(QueryTemplate):
def construct_query(self) -> None:
self.query = """
SELECT salesperson, sum(amt) FROM Sales GROUP BY salesperson
"""
self.header = ["salesperson", "total sales"]
def output_context(self) -> ContextManager[TextIO]:
today = datetime.date.today()
filepath = Path(f"gross_sales_{today:%Y%m%d}.csv")
self.target_file = filepath.open("w")
return self.target_file
def main() -> None:
test_setup()
task_1 = NewVehiclesQuery()
task_1.process_format()
task_2 = SalesGrossQuery()
task_2.process_format()
if __name__ == "__main__":
main()
|
the-stack_106_23962 | import yaconfig
metaconfig = yaconfig.MetaConfig(
yaconfig.Variable("db", type=str, default="local.db", help="Database to use"),
yaconfig.Variable("secret", type=bytes, default='_5#y2L"F4Q8z\n\xec]/', help="Flask secret"),
# Note default values must be str. The are encoded to bytes using UTF8
yaconfig.Variable("text", type=str, default="Ĉu vi komprenas tion?", help="Text to output")
)
# Get a default configuration, which should be overridden when the execution starts
config = yaconfig.Config(metaconfig)
|
the-stack_106_23963 | from flask import request
from app.api.responses import Responses
parties = []
class PoliticalParty:
"""this initializes political party class methods"""
def __init__(self, name, hqAddress, logoUrl):
self.party_id = len(parties) + 1
self.name = name
self.hqAddress = hqAddress
self.logoUrl = logoUrl
@staticmethod
def get_all_parties():
return Responses.complete_response(parties)
def add_political_party(self):
"""this saves political party data"""
new_party = {
"id": len(parties) + 1,
"name": self.name,
"hqAddress": self.hqAddress,
"logoUrl": self.logoUrl
}
parties.append(new_party)
class Update:
@staticmethod
def update_party_details(id):
task = [party for party in parties if party["id"] == id]
if not task:
return Responses.not_found("Party not found"), 404
task[0]['name'] = request.json.get('name', task[0]['name'])
return Responses.complete_response(task)
|
the-stack_106_23965 |
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
|
the-stack_106_23968 | # borg cli interface / toplevel archiver code
import sys
import traceback
try:
import argparse
import collections
import configparser
import faulthandler
import functools
import hashlib
import inspect
import itertools
import json
import logging
import os
import re
import shlex
import shutil
import signal
import stat
import subprocess
import tarfile
import textwrap
import time
from binascii import unhexlify
from contextlib import contextmanager
from datetime import datetime, timedelta
from .logger import create_logger, setup_logging
logger = create_logger()
import borg
from . import __version__
from . import helpers
from .algorithms.checksums import crc32
from .archive import Archive, ArchiveChecker, ArchiveRecreater, Statistics, is_special
from .archive import BackupError, BackupOSError, backup_io, OsOpen, stat_update_check
from .archive import FilesystemObjectProcessors, MetadataCollector, ChunksProcessor
from .cache import Cache, assert_secure, SecurityManager
from .constants import * # NOQA
from .compress import CompressionSpec
from .crypto.key import key_creator, key_argument_names, tam_required_file, tam_required, RepoKey, PassphraseKey
from .crypto.keymanager import KeyManager
from .helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from .helpers import Error, NoManifestError, set_ec
from .helpers import positive_int_validator, location_validator, archivename_validator, ChunkerParams, Location
from .helpers import PrefixSpec, GlobSpec, CommentSpec, SortBySpec, FilesCacheMode
from .helpers import BaseFormatter, ItemFormatter, ArchiveFormatter
from .helpers import format_timedelta, format_file_size, parse_file_size, format_archive
from .helpers import safe_encode, remove_surrogates, bin_to_hex, prepare_dump_dict
from .helpers import interval, prune_within, prune_split, PRUNING_PATTERNS
from .helpers import timestamp
from .helpers import get_cache_dir, os_stat
from .helpers import Manifest, AI_HUMAN_SORT_KEYS
from .helpers import hardlinkable
from .helpers import StableDict
from .helpers import check_python, check_extension_modules
from .helpers import dir_is_tagged, is_slow_msgpack, is_supported_msgpack, yes, sysinfo
from .helpers import log_multi
from .helpers import signal_handler, raising_signal_handler, SigHup, SigTerm
from .helpers import ErrorIgnoringTextIOWrapper
from .helpers import ProgressIndicatorPercent
from .helpers import basic_json_data, json_print
from .helpers import replace_placeholders
from .helpers import ChunkIteratorFileWrapper
from .helpers import popen_with_error_handling, prepare_subprocess_env
from .helpers import dash_open
from .helpers import umount
from .helpers import flags_root, flags_dir, flags_special_follow, flags_special
from .helpers import msgpack
from .helpers import sig_int
from .nanorst import rst_to_terminal
from .patterns import ArgparsePatternAction, ArgparseExcludeFileAction, ArgparsePatternFileAction, parse_exclude_pattern
from .patterns import PatternMatcher
from .item import Item
from .platform import get_flags, get_process_id, SyncFile
from .remote import RepositoryServer, RemoteRepository, cache_if_remote
from .repository import Repository, LIST_SCAN_LIMIT, TAG_PUT, TAG_DELETE, TAG_COMMIT
from .selftest import selftest
from .upgrader import AtticRepositoryUpgrader, BorgRepositoryUpgrader
except BaseException:
# an unhandled exception in the try-block would cause the borg cli command to exit with rc 1 due to python's
# default behavior, see issue #4424.
# as borg defines rc 1 as WARNING, this would be a mismatch, because a crash should be an ERROR (rc 2).
traceback.print_exc()
sys.exit(2) # == EXIT_ERROR
assert EXIT_ERROR == 2, "EXIT_ERROR is not 2, as expected - fix assert AND exception handler right above this line."
STATS_HEADER = " Original size Compressed size Deduplicated size"
PURE_PYTHON_MSGPACK_WARNING = "Using a pure-python msgpack! This will result in lower performance."
def argument(args, str_or_bool):
"""If bool is passed, return it. If str is passed, retrieve named attribute from args."""
if isinstance(str_or_bool, str):
return getattr(args, str_or_bool)
if isinstance(str_or_bool, (list, tuple)):
return any(getattr(args, item) for item in str_or_bool)
return str_or_bool
def with_repository(fake=False, invert_fake=False, create=False, lock=True,
exclusive=False, manifest=True, cache=False, secure=True,
compatibility=None):
"""
Method decorator for subcommand-handling methods: do_XYZ(self, args, repository, …)
If a parameter (where allowed) is a str the attribute named of args is used instead.
:param fake: (str or bool) use None instead of repository, don't do anything else
:param create: create repository
:param lock: lock repository
:param exclusive: (str or bool) lock repository exclusively (for writing)
:param manifest: load manifest and key, pass them as keyword arguments
:param cache: open cache, pass it as keyword argument (implies manifest)
:param secure: do assert_secure after loading manifest
:param compatibility: mandatory if not create and (manifest or cache), specifies mandatory feature categories to check
"""
if not create and (manifest or cache):
if compatibility is None:
raise AssertionError("with_repository decorator used without compatibility argument")
if type(compatibility) is not tuple:
raise AssertionError("with_repository decorator compatibility argument must be of type tuple")
else:
if compatibility is not None:
raise AssertionError("with_repository called with compatibility argument but would not check" + repr(compatibility))
if create:
compatibility = Manifest.NO_OPERATION_CHECK
def decorator(method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
location = args.location # note: 'location' must be always present in args
append_only = getattr(args, 'append_only', False)
storage_quota = getattr(args, 'storage_quota', None)
make_parent_dirs = getattr(args, 'make_parent_dirs', False)
if argument(args, fake) ^ invert_fake:
return method(self, args, repository=None, **kwargs)
elif location.proto == 'ssh':
repository = RemoteRepository(location, create=create, exclusive=argument(args, exclusive),
lock_wait=self.lock_wait, lock=lock, append_only=append_only,
make_parent_dirs=make_parent_dirs, args=args)
else:
repository = Repository(location.path, create=create, exclusive=argument(args, exclusive),
lock_wait=self.lock_wait, lock=lock, append_only=append_only,
storage_quota=storage_quota, make_parent_dirs=make_parent_dirs)
with repository:
if manifest or cache:
kwargs['manifest'], kwargs['key'] = Manifest.load(repository, compatibility)
if 'compression' in args:
kwargs['key'].compressor = args.compression.compressor
if secure:
assert_secure(repository, kwargs['manifest'], self.lock_wait)
if cache:
with Cache(repository, kwargs['key'], kwargs['manifest'],
progress=getattr(args, 'progress', False), lock_wait=self.lock_wait,
cache_mode=getattr(args, 'files_cache_mode', DEFAULT_FILES_CACHE_MODE),
consider_part_files=getattr(args, 'consider_part_files', False)) as cache_:
return method(self, args, repository=repository, cache=cache_, **kwargs)
else:
return method(self, args, repository=repository, **kwargs)
return wrapper
return decorator
def with_archive(method):
@functools.wraps(method)
def wrapper(self, args, repository, key, manifest, **kwargs):
archive = Archive(repository, key, manifest, args.location.archive,
numeric_owner=getattr(args, 'numeric_owner', False),
nobsdflags=getattr(args, 'nobsdflags', False),
cache=kwargs.get('cache'),
consider_part_files=args.consider_part_files, log_json=args.log_json)
return method(self, args, repository=repository, manifest=manifest, key=key, archive=archive, **kwargs)
return wrapper
def parse_storage_quota(storage_quota):
parsed = parse_file_size(storage_quota)
if parsed < parse_file_size('10M'):
raise argparse.ArgumentTypeError('quota is too small (%s). At least 10M are required.' % storage_quota)
return parsed
def get_func(args):
# This works around http://bugs.python.org/issue9351
# func is used at the leaf parsers of the argparse parser tree,
# fallback_func at next level towards the root,
# fallback2_func at the 2nd next level (which is root in our case).
for name in 'func', 'fallback_func', 'fallback2_func':
func = getattr(args, name, None)
if func is not None:
return func
raise Exception('expected func attributes not found')
class Archiver:
def __init__(self, lock_wait=None, prog=None):
self.exit_code = EXIT_SUCCESS
self.lock_wait = lock_wait
self.prog = prog
def print_error(self, msg, *args):
msg = args and msg % args or msg
self.exit_code = EXIT_ERROR
logger.error(msg)
def print_warning(self, msg, *args):
msg = args and msg % args or msg
self.exit_code = EXIT_WARNING # we do not terminate here, so it is a warning
logger.warning(msg)
def print_file_status(self, status, path):
if self.output_list and (self.output_filter is None or status in self.output_filter):
if self.log_json:
print(json.dumps({
'type': 'file_status',
'status': status,
'path': remove_surrogates(path),
}), file=sys.stderr)
else:
logging.getLogger('borg.output.list').info("%1s %s", status, remove_surrogates(path))
@staticmethod
def build_matcher(inclexcl_patterns, include_paths):
matcher = PatternMatcher()
matcher.add_inclexcl(inclexcl_patterns)
matcher.add_includepaths(include_paths)
return matcher
def do_serve(self, args):
"""Start in server mode. This command is usually not used manually."""
RepositoryServer(
restrict_to_paths=args.restrict_to_paths,
restrict_to_repositories=args.restrict_to_repositories,
append_only=args.append_only,
storage_quota=args.storage_quota,
).serve()
return EXIT_SUCCESS
@with_repository(create=True, exclusive=True, manifest=False)
def do_init(self, args, repository):
"""Initialize an empty repository"""
path = args.location.canonical_path()
logger.info('Initializing repository at "%s"' % path)
try:
key = key_creator(repository, args)
except (EOFError, KeyboardInterrupt):
repository.destroy()
return EXIT_WARNING
manifest = Manifest(key, repository)
manifest.key = key
manifest.write()
repository.commit(compact=False)
with Cache(repository, key, manifest, warn_if_unencrypted=False):
pass
if key.tam_required:
tam_file = tam_required_file(repository)
open(tam_file, 'w').close()
logger.warning(
'\n'
'By default repositories initialized with this version will produce security\n'
'errors if written to with an older version (up to and including Borg 1.0.8).\n'
'\n'
'If you want to use these older versions, you can disable the check by running:\n'
'borg upgrade --disable-tam %s\n'
'\n'
'See https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability '
'for details about the security implications.', shlex.quote(path))
if key.NAME != 'plaintext':
if 'repokey' in key.NAME:
logger.warning(
'\n'
'IMPORTANT: you will need both KEY AND PASSPHRASE to access this repo!\n'
'The key is included in the repository config, but should be backed up in case the repository gets corrupted.\n'
'Use "borg key export" to export the key, optionally in printable format.\n'
'Write down the passphrase. Store both at safe place(s).\n')
else:
logger.warning(
'\n'
'IMPORTANT: you will need both KEY AND PASSPHRASE to access this repo!\n'
'Use "borg key export" to export the key, optionally in printable format.\n'
'Write down the passphrase. Store both at safe place(s).\n')
return self.exit_code
@with_repository(exclusive=True, manifest=False)
def do_check(self, args, repository):
"""Check repository consistency"""
if args.repair:
msg = ("'check --repair' is an experimental feature that might result in data loss." +
"\n" +
"Type 'YES' if you understand this and want to continue: ")
if not yes(msg, false_msg="Aborting.", invalid_msg="Invalid answer, aborting.",
truish=('YES', ), retry=False,
env_var_override='BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'):
return EXIT_ERROR
if args.repo_only and any(
(args.verify_data, args.first, args.last, args.prefix is not None, args.glob_archives)):
self.print_error("--repository-only contradicts --first, --last, --prefix and --verify-data arguments.")
return EXIT_ERROR
if args.repair and args.max_duration:
self.print_error("--repair does not allow --max-duration argument.")
return EXIT_ERROR
if args.max_duration and not args.repo_only:
# when doing a partial repo check, we can only check crc32 checksums in segment files,
# we can't build a fresh repo index in memory to verify the on-disk index against it.
# thus, we should not do an archives check based on a unknown-quality on-disk repo index.
# also, there is no max_duration support in the archives check code anyway.
self.print_error("--repository-only is required for --max-duration support.")
return EXIT_ERROR
if not args.archives_only:
if not repository.check(repair=args.repair, save_space=args.save_space, max_duration=args.max_duration):
return EXIT_WARNING
if args.prefix is not None:
args.glob_archives = args.prefix + '*'
if not args.repo_only and not ArchiveChecker().check(
repository, repair=args.repair, archive=args.location.archive,
first=args.first, last=args.last, sort_by=args.sort_by or 'ts', glob=args.glob_archives,
verify_data=args.verify_data, save_space=args.save_space):
return EXIT_WARNING
return EXIT_SUCCESS
@with_repository(compatibility=(Manifest.Operation.CHECK,))
def do_change_passphrase(self, args, repository, manifest, key):
"""Change repository key file passphrase"""
if not hasattr(key, 'change_passphrase'):
print('This repository is not encrypted, cannot change the passphrase.')
return EXIT_ERROR
key.change_passphrase()
logger.info('Key updated')
if hasattr(key, 'find_key'):
# print key location to make backing it up easier
logger.info('Key location: %s', key.find_key())
return EXIT_SUCCESS
@with_repository(lock=False, exclusive=False, manifest=False, cache=False)
def do_key_export(self, args, repository):
"""Export the repository key for backup"""
manager = KeyManager(repository)
manager.load_keyblob()
if args.paper:
manager.export_paperkey(args.path)
else:
if not args.path:
self.print_error("output file to export key to expected")
return EXIT_ERROR
try:
if args.qr:
manager.export_qr(args.path)
else:
manager.export(args.path)
except IsADirectoryError:
self.print_error("'{}' must be a file, not a directory".format(args.path))
return EXIT_ERROR
return EXIT_SUCCESS
@with_repository(lock=False, exclusive=False, manifest=False, cache=False)
def do_key_import(self, args, repository):
"""Import the repository key from backup"""
manager = KeyManager(repository)
if args.paper:
if args.path:
self.print_error("with --paper import from file is not supported")
return EXIT_ERROR
manager.import_paperkey(args)
else:
if not args.path:
self.print_error("input file to import key from expected")
return EXIT_ERROR
if args.path != '-' and not os.path.exists(args.path):
self.print_error("input file does not exist: " + args.path)
return EXIT_ERROR
manager.import_keyfile(args)
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_migrate_to_repokey(self, args, repository):
"""Migrate passphrase -> repokey"""
manifest_data = repository.get(Manifest.MANIFEST_ID)
key_old = PassphraseKey.detect(repository, manifest_data)
key_new = RepoKey(repository)
key_new.target = repository
key_new.repository_id = repository.id
key_new.enc_key = key_old.enc_key
key_new.enc_hmac_key = key_old.enc_hmac_key
key_new.id_key = key_old.id_key
key_new.chunk_seed = key_old.chunk_seed
key_new.change_passphrase() # option to change key protection passphrase, save
logger.info('Key updated')
return EXIT_SUCCESS
def do_benchmark_crud(self, args):
"""Benchmark Create, Read, Update, Delete for archives."""
def measurement_run(repo, path):
archive = repo + '::borg-benchmark-crud'
compression = '--compression=none'
# measure create perf (without files cache to always have it chunking)
t_start = time.monotonic()
rc = self.do_create(self.parse_args(['create', compression, '--files-cache=disabled', archive + '1', path]))
t_end = time.monotonic()
dt_create = t_end - t_start
assert rc == 0
# now build files cache
rc1 = self.do_create(self.parse_args(['create', compression, archive + '2', path]))
rc2 = self.do_delete(self.parse_args(['delete', archive + '2']))
assert rc1 == rc2 == 0
# measure a no-change update (archive1 is still present)
t_start = time.monotonic()
rc1 = self.do_create(self.parse_args(['create', compression, archive + '3', path]))
t_end = time.monotonic()
dt_update = t_end - t_start
rc2 = self.do_delete(self.parse_args(['delete', archive + '3']))
assert rc1 == rc2 == 0
# measure extraction (dry-run: without writing result to disk)
t_start = time.monotonic()
rc = self.do_extract(self.parse_args(['extract', '--dry-run', archive + '1']))
t_end = time.monotonic()
dt_extract = t_end - t_start
assert rc == 0
# measure archive deletion (of LAST present archive with the data)
t_start = time.monotonic()
rc = self.do_delete(self.parse_args(['delete', archive + '1']))
t_end = time.monotonic()
dt_delete = t_end - t_start
assert rc == 0
return dt_create, dt_update, dt_extract, dt_delete
@contextmanager
def test_files(path, count, size, random):
path = os.path.join(path, 'borg-test-data')
os.makedirs(path)
for i in range(count):
fname = os.path.join(path, 'file_%d' % i)
data = b'\0' * size if not random else os.urandom(size)
with SyncFile(fname, binary=True) as fd: # used for posix_fadvise's sake
fd.write(data)
yield path
shutil.rmtree(path)
if '_BORG_BENCHMARK_CRUD_TEST' in os.environ:
tests = [
('Z-TEST', 1, 1, False),
('R-TEST', 1, 1, True),
]
else:
tests = [
('Z-BIG', 10, 100000000, False),
('R-BIG', 10, 100000000, True),
('Z-MEDIUM', 1000, 1000000, False),
('R-MEDIUM', 1000, 1000000, True),
('Z-SMALL', 10000, 10000, False),
('R-SMALL', 10000, 10000, True),
]
for msg, count, size, random in tests:
with test_files(args.path, count, size, random) as path:
dt_create, dt_update, dt_extract, dt_delete = measurement_run(args.location.canonical_path(), path)
total_size_MB = count * size / 1e06
file_size_formatted = format_file_size(size)
content = 'random' if random else 'all-zero'
fmt = '%s-%-10s %9.2f MB/s (%d * %s %s files: %.2fs)'
print(fmt % ('C', msg, total_size_MB / dt_create, count, file_size_formatted, content, dt_create))
print(fmt % ('R', msg, total_size_MB / dt_extract, count, file_size_formatted, content, dt_extract))
print(fmt % ('U', msg, total_size_MB / dt_update, count, file_size_formatted, content, dt_update))
print(fmt % ('D', msg, total_size_MB / dt_delete, count, file_size_formatted, content, dt_delete))
return 0
@with_repository(fake='dry_run', exclusive=True, compatibility=(Manifest.Operation.WRITE,))
def do_create(self, args, repository, manifest=None, key=None):
"""Create new archive"""
matcher = PatternMatcher(fallback=True)
matcher.add_inclexcl(args.patterns)
def create_inner(archive, cache, fso):
# Add cache dir to inode_skip list
skip_inodes = set()
try:
st = os.stat(get_cache_dir())
skip_inodes.add((st.st_ino, st.st_dev))
except OSError:
pass
# Add local repository dir to inode_skip list
if not args.location.host:
try:
st = os.stat(args.location.path)
skip_inodes.add((st.st_ino, st.st_dev))
except OSError:
pass
logger.debug('Processing files ...')
for path in args.paths:
if path == '-': # stdin
path = args.stdin_name
if not dry_run:
try:
status = fso.process_stdin(path=path, cache=cache)
except BackupOSError as e:
status = 'E'
self.print_warning('%s: %s', path, e)
else:
status = '-'
self.print_file_status(status, path)
continue
path = os.path.normpath(path)
parent_dir = os.path.dirname(path) or '.'
name = os.path.basename(path)
# note: for path == '/': name == '' and parent_dir == '/'.
# the empty name will trigger a fall-back to path-based processing in os_stat and os_open.
with OsOpen(path=parent_dir, flags=flags_root, noatime=True, op='open_root') as parent_fd:
try:
st = os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False)
except OSError as e:
self.print_warning('%s: %s', path, e)
continue
if args.one_file_system:
restrict_dev = st.st_dev
else:
restrict_dev = None
self._process(path=path, parent_fd=parent_fd, name=name,
fso=fso, cache=cache, matcher=matcher,
exclude_caches=args.exclude_caches, exclude_if_present=args.exclude_if_present,
keep_exclude_tags=args.keep_exclude_tags, skip_inodes=skip_inodes,
restrict_dev=restrict_dev, read_special=args.read_special, dry_run=dry_run)
if not dry_run:
if args.progress:
archive.stats.show_progress(final=True)
archive.stats += fso.stats
if sig_int:
# do not save the archive if the user ctrl-c-ed - it is valid, but incomplete.
# we already have a checkpoint archive in this case.
self.print_error("Got Ctrl-C / SIGINT.")
else:
archive.save(comment=args.comment, timestamp=args.timestamp, stats=archive.stats)
args.stats |= args.json
if args.stats:
if args.json:
json_print(basic_json_data(manifest, cache=cache, extra={
'archive': archive,
}))
else:
log_multi(DASHES,
str(archive),
DASHES,
STATS_HEADER,
str(archive.stats),
str(cache),
DASHES, logger=logging.getLogger('borg.output.stats'))
self.output_filter = args.output_filter
self.output_list = args.output_list
self.nobsdflags = args.nobsdflags
self.exclude_nodump = args.exclude_nodump
dry_run = args.dry_run
t0 = datetime.utcnow()
t0_monotonic = time.monotonic()
logger.info('Creating archive at "%s"' % args.location.orig)
if not dry_run:
with Cache(repository, key, manifest, progress=args.progress,
lock_wait=self.lock_wait, permit_adhoc_cache=args.no_cache_sync,
cache_mode=args.files_cache_mode) as cache:
archive = Archive(repository, key, manifest, args.location.archive, cache=cache,
create=True, checkpoint_interval=args.checkpoint_interval,
numeric_owner=args.numeric_owner, noatime=not args.atime, noctime=args.noctime,
progress=args.progress,
chunker_params=args.chunker_params, start=t0, start_monotonic=t0_monotonic,
log_json=args.log_json)
metadata_collector = MetadataCollector(noatime=not args.atime, noctime=args.noctime,
nobsdflags=args.nobsdflags, numeric_owner=args.numeric_owner, nobirthtime=args.nobirthtime)
cp = ChunksProcessor(cache=cache, key=key,
add_item=archive.add_item, write_checkpoint=archive.write_checkpoint,
checkpoint_interval=args.checkpoint_interval, rechunkify=False)
fso = FilesystemObjectProcessors(metadata_collector=metadata_collector, cache=cache, key=key,
process_file_chunks=cp.process_file_chunks, add_item=archive.add_item,
chunker_params=args.chunker_params, show_progress=args.progress)
create_inner(archive, cache, fso)
else:
create_inner(None, None, None)
return self.exit_code
def _process(self, *, path, parent_fd=None, name=None,
fso, cache, matcher,
exclude_caches, exclude_if_present, keep_exclude_tags, skip_inodes,
restrict_dev, read_special=False, dry_run=False):
"""
Process *path* (or, preferably, parent_fd/name) recursively according to the various parameters.
This should only raise on critical errors. Per-item errors must be handled within this method.
"""
if sig_int and sig_int.action_done():
# the user says "get out of here!" and we have already completed the desired action.
return
try:
recurse_excluded_dir = False
if matcher.match(path):
with backup_io('stat'):
st = os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False)
else:
self.print_file_status('x', path)
# get out here as quickly as possible:
# we only need to continue if we shall recurse into an excluded directory.
# if we shall not recurse, then do not even touch (stat()) the item, it
# could trigger an error, e.g. if access is forbidden, see #3209.
if not matcher.recurse_dir:
return
with backup_io('stat'):
st = os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False)
recurse_excluded_dir = stat.S_ISDIR(st.st_mode)
if not recurse_excluded_dir:
return
if (st.st_ino, st.st_dev) in skip_inodes:
return
# if restrict_dev is given, we do not want to recurse into a new filesystem,
# but we WILL save the mountpoint directory (or more precise: the root
# directory of the mounted filesystem that shadows the mountpoint dir).
recurse = restrict_dev is None or st.st_dev == restrict_dev
status = None
if self.exclude_nodump:
# Ignore if nodump flag is set
with backup_io('flags'):
if get_flags(path=path, st=st) & stat.UF_NODUMP:
self.print_file_status('x', path)
return
if stat.S_ISREG(st.st_mode):
if not dry_run:
status = fso.process_file(path=path, parent_fd=parent_fd, name=name, st=st, cache=cache)
elif stat.S_ISDIR(st.st_mode):
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_dir,
noatime=True, op='dir_open') as child_fd:
# child_fd is None for directories on windows, in that case a race condition check is not possible.
if child_fd is not None:
with backup_io('fstat'):
st = stat_update_check(st, os.fstat(child_fd))
if recurse:
tag_names = dir_is_tagged(path, exclude_caches, exclude_if_present)
if tag_names:
# if we are already recursing in an excluded dir, we do not need to do anything else than
# returning (we do not need to archive or recurse into tagged directories), see #3991:
if not recurse_excluded_dir:
if keep_exclude_tags and not dry_run:
fso.process_dir(path=path, fd=child_fd, st=st)
for tag_name in tag_names:
tag_path = os.path.join(path, tag_name)
self._process(path=tag_path, parent_fd=child_fd, name=tag_name,
fso=fso, cache=cache, matcher=matcher,
exclude_caches=exclude_caches, exclude_if_present=exclude_if_present,
keep_exclude_tags=keep_exclude_tags, skip_inodes=skip_inodes,
restrict_dev=restrict_dev, read_special=read_special, dry_run=dry_run)
self.print_file_status('x', path)
return
if not dry_run:
if not recurse_excluded_dir:
status = fso.process_dir(path=path, fd=child_fd, st=st)
if recurse:
with backup_io('scandir'):
entries = helpers.scandir_inorder(path=path, fd=child_fd)
for dirent in entries:
normpath = os.path.normpath(os.path.join(path, dirent.name))
self._process(path=normpath, parent_fd=child_fd, name=dirent.name,
fso=fso, cache=cache, matcher=matcher,
exclude_caches=exclude_caches, exclude_if_present=exclude_if_present,
keep_exclude_tags=keep_exclude_tags, skip_inodes=skip_inodes,
restrict_dev=restrict_dev, read_special=read_special, dry_run=dry_run)
elif stat.S_ISLNK(st.st_mode):
if not dry_run:
if not read_special:
status = fso.process_symlink(path=path, parent_fd=parent_fd, name=name, st=st)
else:
try:
st_target = os.stat(name, dir_fd=parent_fd, follow_symlinks=True)
except OSError:
special = False
else:
special = is_special(st_target.st_mode)
if special:
status = fso.process_file(path=path, parent_fd=parent_fd, name=name, st=st_target,
cache=cache, flags=flags_special_follow)
else:
status = fso.process_symlink(path=path, parent_fd=parent_fd, name=name, st=st)
elif stat.S_ISFIFO(st.st_mode):
if not dry_run:
if not read_special:
status = fso.process_fifo(path=path, parent_fd=parent_fd, name=name, st=st)
else:
status = fso.process_file(path=path, parent_fd=parent_fd, name=name, st=st,
cache=cache, flags=flags_special)
elif stat.S_ISCHR(st.st_mode):
if not dry_run:
if not read_special:
status = fso.process_dev(path=path, parent_fd=parent_fd, name=name, st=st, dev_type='c')
else:
status = fso.process_file(path=path, parent_fd=parent_fd, name=name, st=st,
cache=cache, flags=flags_special)
elif stat.S_ISBLK(st.st_mode):
if not dry_run:
if not read_special:
status = fso.process_dev(path=path, parent_fd=parent_fd, name=name, st=st, dev_type='b')
else:
status = fso.process_file(path=path, parent_fd=parent_fd, name=name, st=st,
cache=cache, flags=flags_special)
elif stat.S_ISSOCK(st.st_mode):
# Ignore unix sockets
return
elif stat.S_ISDOOR(st.st_mode):
# Ignore Solaris doors
return
elif stat.S_ISPORT(st.st_mode):
# Ignore Solaris event ports
return
else:
self.print_warning('Unknown file type: %s', path)
return
except (BackupOSError, BackupError) as e:
self.print_warning('%s: %s', path, e)
status = 'E'
if status == 'C':
self.print_warning('%s: file changed while we backed it up', path)
# Status output
if status is None:
if not dry_run:
status = '?' # need to add a status code somewhere
else:
status = '-' # dry run, item was not backed up
if not recurse_excluded_dir:
self.print_file_status(status, path)
@staticmethod
def build_filter(matcher, peek_and_store_hardlink_masters, strip_components):
if strip_components:
def item_filter(item):
matched = matcher.match(item.path) and os.sep.join(item.path.split(os.sep)[strip_components:])
peek_and_store_hardlink_masters(item, matched)
return matched
else:
def item_filter(item):
matched = matcher.match(item.path)
peek_and_store_hardlink_masters(item, matched)
return matched
return item_filter
@with_repository(compatibility=(Manifest.Operation.READ,))
@with_archive
def do_extract(self, args, repository, manifest, key, archive):
"""Extract archive contents"""
# be restrictive when restoring files, restore permissions later
if sys.getfilesystemencoding() == 'ascii':
logger.warning('Warning: File system encoding is "ascii", extracting non-ascii filenames will not be supported.')
if sys.platform.startswith(('linux', 'freebsd', 'netbsd', 'openbsd', 'darwin', )):
logger.warning('Hint: You likely need to fix your locale setup. E.g. install locales and use: LANG=en_US.UTF-8')
matcher = self.build_matcher(args.patterns, args.paths)
progress = args.progress
output_list = args.output_list
dry_run = args.dry_run
stdout = args.stdout
sparse = args.sparse
strip_components = args.strip_components
dirs = []
partial_extract = not matcher.empty() or strip_components
hardlink_masters = {} if partial_extract else None
def peek_and_store_hardlink_masters(item, matched):
if (partial_extract and not matched and hardlinkable(item.mode) and
item.get('hardlink_master', True) and 'source' not in item):
hardlink_masters[item.get('path')] = (item.get('chunks'), None)
filter = self.build_filter(matcher, peek_and_store_hardlink_masters, strip_components)
if progress:
pi = ProgressIndicatorPercent(msg='%5.1f%% Extracting: %s', step=0.1, msgid='extract')
pi.output('Calculating total archive size for the progress indicator (might take long for large archives)')
extracted_size = sum(item.get_size(hardlink_masters) for item in archive.iter_items(filter))
pi.total = extracted_size
else:
pi = None
for item in archive.iter_items(filter, partial_extract=partial_extract,
preload=True, hardlink_masters=hardlink_masters):
orig_path = item.path
if strip_components:
item.path = os.sep.join(orig_path.split(os.sep)[strip_components:])
if not args.dry_run:
while dirs and not item.path.startswith(dirs[-1].path):
dir_item = dirs.pop(-1)
try:
archive.extract_item(dir_item, stdout=stdout)
except BackupOSError as e:
self.print_warning('%s: %s', remove_surrogates(dir_item.path), e)
if output_list:
logging.getLogger('borg.output.list').info(remove_surrogates(item.path))
try:
if dry_run:
archive.extract_item(item, dry_run=True, pi=pi)
else:
if stat.S_ISDIR(item.mode):
dirs.append(item)
archive.extract_item(item, stdout=stdout, restore_attrs=False)
else:
archive.extract_item(item, stdout=stdout, sparse=sparse, hardlink_masters=hardlink_masters,
stripped_components=strip_components, original_path=orig_path, pi=pi)
except (BackupOSError, BackupError) as e:
self.print_warning('%s: %s', remove_surrogates(orig_path), e)
if pi:
pi.finish()
if not args.dry_run:
pi = ProgressIndicatorPercent(total=len(dirs), msg='Setting directory permissions %3.0f%%',
msgid='extract.permissions')
while dirs:
pi.show()
dir_item = dirs.pop(-1)
try:
archive.extract_item(dir_item, stdout=stdout)
except BackupOSError as e:
self.print_warning('%s: %s', remove_surrogates(dir_item.path), e)
for pattern in matcher.get_unmatched_include_patterns():
self.print_warning("Include pattern '%s' never matched.", pattern)
if pi:
# clear progress output
pi.finish()
return self.exit_code
@with_repository(compatibility=(Manifest.Operation.READ,))
@with_archive
def do_export_tar(self, args, repository, manifest, key, archive):
"""Export archive contents as a tarball"""
self.output_list = args.output_list
# A quick note about the general design of tar_filter and tarfile;
# The tarfile module of Python can provide some compression mechanisms
# by itself, using the builtin gzip, bz2 and lzma modules (and "tarmodes"
# such as "w:xz").
#
# Doing so would have three major drawbacks:
# For one the compressor runs on the same thread as the program using the
# tarfile, stealing valuable CPU time from Borg and thus reducing throughput.
# Then this limits the available options - what about lz4? Brotli? zstd?
# The third issue is that systems can ship more optimized versions than those
# built into Python, e.g. pigz or pxz, which can use more than one thread for
# compression.
#
# Therefore we externalize compression by using a filter program, which has
# none of these drawbacks. The only issue of using an external filter is
# that it has to be installed -- hardly a problem, considering that
# the decompressor must be installed as well to make use of the exported tarball!
filter = None
if args.tar_filter == 'auto':
# Note that filter remains None if tarfile is '-'.
if args.tarfile.endswith('.tar.gz'):
filter = 'gzip'
elif args.tarfile.endswith('.tar.bz2'):
filter = 'bzip2'
elif args.tarfile.endswith('.tar.xz'):
filter = 'xz'
logger.debug('Automatically determined tar filter: %s', filter)
else:
filter = args.tar_filter
tarstream = dash_open(args.tarfile, 'wb')
tarstream_close = args.tarfile != '-'
if filter:
# When we put a filter between us and the final destination,
# the selected output (tarstream until now) becomes the output of the filter (=filterout).
# The decision whether to close that or not remains the same.
filterout = tarstream
filterout_close = tarstream_close
env = prepare_subprocess_env(system=True)
# There is no deadlock potential here (the subprocess docs warn about this), because
# communication with the process is a one-way road, i.e. the process can never block
# for us to do something while we block on the process for something different.
filterproc = popen_with_error_handling(filter, stdin=subprocess.PIPE, stdout=filterout,
log_prefix='--tar-filter: ', env=env)
if not filterproc:
return EXIT_ERROR
# Always close the pipe, otherwise the filter process would not notice when we are done.
tarstream = filterproc.stdin
tarstream_close = True
# The | (pipe) symbol instructs tarfile to use a streaming mode of operation
# where it never seeks on the passed fileobj.
tar = tarfile.open(fileobj=tarstream, mode='w|')
self._export_tar(args, archive, tar)
# This does not close the fileobj (tarstream) we passed to it -- a side effect of the | mode.
tar.close()
if tarstream_close:
tarstream.close()
if filter:
logger.debug('Done creating tar, waiting for filter to die...')
rc = filterproc.wait()
if rc:
logger.error('--tar-filter exited with code %d, output file is likely unusable!', rc)
self.exit_code = EXIT_ERROR
else:
logger.debug('filter exited with code %d', rc)
if filterout_close:
filterout.close()
return self.exit_code
def _export_tar(self, args, archive, tar):
matcher = self.build_matcher(args.patterns, args.paths)
progress = args.progress
output_list = args.output_list
strip_components = args.strip_components
partial_extract = not matcher.empty() or strip_components
hardlink_masters = {} if partial_extract else None
def peek_and_store_hardlink_masters(item, matched):
if (partial_extract and not matched and hardlinkable(item.mode) and
item.get('hardlink_master', True) and 'source' not in item):
hardlink_masters[item.get('path')] = (item.get('chunks'), None)
filter = self.build_filter(matcher, peek_and_store_hardlink_masters, strip_components)
if progress:
pi = ProgressIndicatorPercent(msg='%5.1f%% Processing: %s', step=0.1, msgid='extract')
pi.output('Calculating size')
extracted_size = sum(item.get_size(hardlink_masters) for item in archive.iter_items(filter))
pi.total = extracted_size
else:
pi = None
def item_content_stream(item):
"""
Return a file-like object that reads from the chunks of *item*.
"""
chunk_iterator = archive.pipeline.fetch_many([chunk_id for chunk_id, _, _ in item.chunks])
if pi:
info = [remove_surrogates(item.path)]
return ChunkIteratorFileWrapper(chunk_iterator,
lambda read_bytes: pi.show(increase=len(read_bytes), info=info))
else:
return ChunkIteratorFileWrapper(chunk_iterator)
def item_to_tarinfo(item, original_path):
"""
Transform a Borg *item* into a tarfile.TarInfo object.
Return a tuple (tarinfo, stream), where stream may be a file-like object that represents
the file contents, if any, and is None otherwise. When *tarinfo* is None, the *item*
cannot be represented as a TarInfo object and should be skipped.
"""
# If we would use the PAX (POSIX) format (which we currently don't),
# we can support most things that aren't possible with classic tar
# formats, including GNU tar, such as:
# atime, ctime, possibly Linux capabilities (security.* xattrs)
# and various additions supported by GNU tar in POSIX mode.
stream = None
tarinfo = tarfile.TarInfo()
tarinfo.name = item.path
tarinfo.mtime = item.mtime / 1e9
tarinfo.mode = stat.S_IMODE(item.mode)
tarinfo.uid = item.uid
tarinfo.gid = item.gid
tarinfo.uname = item.user or ''
tarinfo.gname = item.group or ''
# The linkname in tar has the same dual use the 'source' attribute of Borg items,
# i.e. for symlinks it means the destination, while for hardlinks it refers to the
# file.
# Since hardlinks in tar have a different type code (LNKTYPE) the format might
# support hardlinking arbitrary objects (including symlinks and directories), but
# whether implementations actually support that is a whole different question...
tarinfo.linkname = ""
modebits = stat.S_IFMT(item.mode)
if modebits == stat.S_IFREG:
tarinfo.type = tarfile.REGTYPE
if 'source' in item:
source = os.sep.join(item.source.split(os.sep)[strip_components:])
if hardlink_masters is None:
linkname = source
else:
chunks, linkname = hardlink_masters.get(item.source, (None, source))
if linkname:
# Master was already added to the archive, add a hardlink reference to it.
tarinfo.type = tarfile.LNKTYPE
tarinfo.linkname = linkname
elif chunks is not None:
# The item which has the chunks was not put into the tar, therefore
# we do that now and update hardlink_masters to reflect that.
item.chunks = chunks
tarinfo.size = item.get_size()
stream = item_content_stream(item)
hardlink_masters[item.get('source') or original_path] = (None, item.path)
else:
tarinfo.size = item.get_size()
stream = item_content_stream(item)
elif modebits == stat.S_IFDIR:
tarinfo.type = tarfile.DIRTYPE
elif modebits == stat.S_IFLNK:
tarinfo.type = tarfile.SYMTYPE
tarinfo.linkname = item.source
elif modebits == stat.S_IFBLK:
tarinfo.type = tarfile.BLKTYPE
tarinfo.devmajor = os.major(item.rdev)
tarinfo.devminor = os.minor(item.rdev)
elif modebits == stat.S_IFCHR:
tarinfo.type = tarfile.CHRTYPE
tarinfo.devmajor = os.major(item.rdev)
tarinfo.devminor = os.minor(item.rdev)
elif modebits == stat.S_IFIFO:
tarinfo.type = tarfile.FIFOTYPE
else:
self.print_warning('%s: unsupported file type %o for tar export', remove_surrogates(item.path), modebits)
set_ec(EXIT_WARNING)
return None, stream
return tarinfo, stream
for item in archive.iter_items(filter, preload=True, hardlink_masters=hardlink_masters):
orig_path = item.path
if strip_components:
item.path = os.sep.join(orig_path.split(os.sep)[strip_components:])
tarinfo, stream = item_to_tarinfo(item, orig_path)
if tarinfo:
if output_list:
logging.getLogger('borg.output.list').info(remove_surrogates(orig_path))
tar.addfile(tarinfo, stream)
if pi:
pi.finish()
for pattern in matcher.get_unmatched_include_patterns():
self.print_warning("Include pattern '%s' never matched.", pattern)
return self.exit_code
@with_repository(compatibility=(Manifest.Operation.READ,))
@with_archive
def do_diff(self, args, repository, manifest, key, archive):
"""Diff contents of two archives"""
def print_output(diff, path):
print("{:<19} {}".format(diff, path))
archive1 = archive
archive2 = Archive(repository, key, manifest, args.archive2,
consider_part_files=args.consider_part_files)
can_compare_chunk_ids = archive1.metadata.get('chunker_params', False) == archive2.metadata.get(
'chunker_params', True) or args.same_chunker_params
if not can_compare_chunk_ids:
self.print_warning('--chunker-params might be different between archives, diff will be slow.\n'
'If you know for certain that they are the same, pass --same-chunker-params '
'to override this check.')
matcher = self.build_matcher(args.patterns, args.paths)
diffs = Archive.compare_archives_iter(archive1, archive2, matcher, can_compare_chunk_ids=can_compare_chunk_ids)
# Conversion to string and filtering for diff.equal to save memory if sorting
diffs = ((path, str(diff)) for path, diff in diffs if not diff.equal)
if args.sort:
diffs = sorted(diffs)
for path, diff in diffs:
print_output(diff, path)
for pattern in matcher.get_unmatched_include_patterns():
self.print_warning("Include pattern '%s' never matched.", pattern)
return self.exit_code
@with_repository(exclusive=True, cache=True, compatibility=(Manifest.Operation.CHECK,))
@with_archive
def do_rename(self, args, repository, manifest, key, cache, archive):
"""Rename an existing archive"""
archive.rename(args.name)
manifest.write()
repository.commit(compact=False)
cache.commit()
return self.exit_code
@with_repository(exclusive=True, manifest=False)
def do_delete(self, args, repository):
"""Delete an existing repository or archives"""
archive_filter_specified = any((args.first, args.last, args.prefix is not None, args.glob_archives))
explicit_archives_specified = args.location.archive or args.archives
if archive_filter_specified and explicit_archives_specified:
self.print_error('Mixing archive filters and explicitly named archives is not supported.')
return self.exit_code
if archive_filter_specified or explicit_archives_specified:
return self._delete_archives(args, repository)
else:
return self._delete_repository(args, repository)
def _delete_archives(self, args, repository):
"""Delete archives"""
dry_run = args.dry_run
manifest, key = Manifest.load(repository, (Manifest.Operation.DELETE,))
if args.location.archive or args.archives:
archives = list(args.archives)
if args.location.archive:
archives.insert(0, args.location.archive)
archive_names = tuple(archives)
else:
archive_names = tuple(x.name for x in manifest.archives.list_considering(args))
if not archive_names:
return self.exit_code
if args.forced == 2:
deleted = False
for i, archive_name in enumerate(archive_names, 1):
try:
current_archive = manifest.archives.pop(archive_name)
except KeyError:
self.exit_code = EXIT_WARNING
logger.warning('Archive {} not found ({}/{}).'.format(archive_name, i, len(archive_names)))
else:
deleted = True
msg = 'Would delete: {} ({}/{})' if dry_run else 'Deleted archive: {} ({}/{})'
logger.info(msg.format(format_archive(current_archive), i, len(archive_names)))
if dry_run:
logger.info('Finished dry-run.')
elif deleted:
manifest.write()
# note: might crash in compact() after committing the repo
repository.commit(compact=False)
logger.warning('Done. Run "borg check --repair" to clean up the mess.')
else:
logger.warning('Aborted.')
return self.exit_code
stats = Statistics()
with Cache(repository, key, manifest, progress=args.progress, lock_wait=self.lock_wait) as cache:
msg_delete = 'Would delete archive: {} ({}/{})' if dry_run else 'Deleting archive: {} ({}/{})'
msg_not_found = 'Archive {} not found ({}/{}).'
for i, archive_name in enumerate(archive_names, 1):
try:
archive_info = manifest.archives[archive_name]
except KeyError:
logger.warning(msg_not_found.format(archive_name, i, len(archive_names)))
else:
logger.info(msg_delete.format(format_archive(archive_info), i, len(archive_names)))
if not dry_run:
archive = Archive(repository, key, manifest, archive_name, cache=cache,
consider_part_files=args.consider_part_files)
archive.delete(stats, progress=args.progress, forced=args.forced)
if not dry_run:
manifest.write()
repository.commit(compact=False, save_space=args.save_space)
cache.commit()
if args.stats:
log_multi(DASHES,
STATS_HEADER,
stats.summary.format(label='Deleted data:', stats=stats),
str(cache),
DASHES, logger=logging.getLogger('borg.output.stats'))
return self.exit_code
def _delete_repository(self, args, repository):
"""Delete a repository"""
dry_run = args.dry_run
if not args.cache_only:
msg = []
try:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
except NoManifestError:
msg.append("You requested to completely DELETE the repository *including* all archives it may "
"contain.")
msg.append("This repository seems to have no manifest, so we can't tell anything about its "
"contents.")
else:
msg.append("You requested to completely DELETE the repository *including* all archives it "
"contains:")
for archive_info in manifest.archives.list(sort_by=['ts']):
msg.append(format_archive(archive_info))
msg.append("Type 'YES' if you understand this and want to continue: ")
msg = '\n'.join(msg)
if not yes(msg, false_msg="Aborting.", invalid_msg='Invalid answer, aborting.', truish=('YES',),
retry=False, env_var_override='BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'):
self.exit_code = EXIT_ERROR
return self.exit_code
if not dry_run:
repository.destroy()
logger.info("Repository deleted.")
SecurityManager.destroy(repository)
else:
logger.info("Would delete repository.")
if not dry_run:
Cache.destroy(repository)
logger.info("Cache deleted.")
else:
logger.info("Would delete cache.")
return self.exit_code
def do_mount(self, args):
"""Mount archive or an entire repository as a FUSE filesystem"""
# Perform these checks before opening the repository and asking for a passphrase.
try:
import borg.fuse
except ImportError as e:
self.print_error('borg mount not available: loading FUSE support failed [ImportError: %s]' % str(e))
return self.exit_code
if not os.path.isdir(args.mountpoint) or not os.access(args.mountpoint, os.R_OK | os.W_OK | os.X_OK):
self.print_error('%s: Mountpoint must be a writable directory' % args.mountpoint)
return self.exit_code
return self._do_mount(args)
@with_repository(compatibility=(Manifest.Operation.READ,))
def _do_mount(self, args, repository, manifest, key):
from .fuse import FuseOperations
with cache_if_remote(repository, decrypted_cache=key) as cached_repo:
operations = FuseOperations(key, repository, manifest, args, cached_repo)
logger.info("Mounting filesystem")
try:
operations.mount(args.mountpoint, args.options, args.foreground)
except RuntimeError:
# Relevant error message already printed to stderr by FUSE
self.exit_code = EXIT_ERROR
return self.exit_code
def do_umount(self, args):
"""un-mount the FUSE filesystem"""
return umount(args.mountpoint)
@with_repository(compatibility=(Manifest.Operation.READ,))
def do_list(self, args, repository, manifest, key):
"""List archive or repository contents"""
if args.location.archive:
if args.json:
self.print_error('The --json option is only valid for listing archives, not archive contents.')
return self.exit_code
return self._list_archive(args, repository, manifest, key)
else:
if args.json_lines:
self.print_error('The --json-lines option is only valid for listing archive contents, not archives.')
return self.exit_code
return self._list_repository(args, repository, manifest, key)
def _list_archive(self, args, repository, manifest, key):
matcher = self.build_matcher(args.patterns, args.paths)
if args.format is not None:
format = args.format
elif args.short:
format = "{path}{NL}"
else:
format = "{mode} {user:6} {group:6} {size:8} {mtime} {path}{extra}{NL}"
def _list_inner(cache):
archive = Archive(repository, key, manifest, args.location.archive, cache=cache,
consider_part_files=args.consider_part_files)
formatter = ItemFormatter(archive, format, json_lines=args.json_lines)
for item in archive.iter_items(lambda item: matcher.match(item.path)):
sys.stdout.write(formatter.format_item(item))
# Only load the cache if it will be used
if ItemFormatter.format_needs_cache(format):
with Cache(repository, key, manifest, lock_wait=self.lock_wait) as cache:
_list_inner(cache)
else:
_list_inner(cache=None)
return self.exit_code
def _list_repository(self, args, repository, manifest, key):
if args.format is not None:
format = args.format
elif args.short:
format = "{archive}{NL}"
else:
format = "{archive:<36} {time} [{id}]{NL}"
formatter = ArchiveFormatter(format, repository, manifest, key, json=args.json)
output_data = []
for archive_info in manifest.archives.list_considering(args):
if args.json:
output_data.append(formatter.get_item_data(archive_info))
else:
sys.stdout.write(formatter.format_item(archive_info))
if args.json:
json_print(basic_json_data(manifest, extra={
'archives': output_data
}))
return self.exit_code
@with_repository(cache=True, compatibility=(Manifest.Operation.READ,))
def do_info(self, args, repository, manifest, key, cache):
"""Show archive details such as disk space used"""
if any((args.location.archive, args.first, args.last, args.prefix is not None, args.glob_archives)):
return self._info_archives(args, repository, manifest, key, cache)
else:
return self._info_repository(args, repository, manifest, key, cache)
def _info_archives(self, args, repository, manifest, key, cache):
def format_cmdline(cmdline):
return remove_surrogates(' '.join(shlex.quote(x) for x in cmdline))
if args.location.archive:
archive_names = (args.location.archive,)
else:
archive_names = tuple(x.name for x in manifest.archives.list_considering(args))
if not archive_names:
return self.exit_code
output_data = []
for i, archive_name in enumerate(archive_names, 1):
archive = Archive(repository, key, manifest, archive_name, cache=cache,
consider_part_files=args.consider_part_files)
info = archive.info()
if args.json:
output_data.append(info)
else:
info['duration'] = format_timedelta(timedelta(seconds=info['duration']))
info['command_line'] = format_cmdline(info['command_line'])
print(textwrap.dedent("""
Archive name: {name}
Archive fingerprint: {id}
Comment: {comment}
Hostname: {hostname}
Username: {username}
Time (start): {start}
Time (end): {end}
Duration: {duration}
Number of files: {stats[nfiles]}
Command line: {command_line}
Utilization of maximum supported archive size: {limits[max_archive_size]:.0%}
------------------------------------------------------------------------------
Original size Compressed size Deduplicated size
This archive: {stats[original_size]:>20s} {stats[compressed_size]:>20s} {stats[deduplicated_size]:>20s}
{cache}
""").strip().format(cache=cache, **info))
if self.exit_code:
break
if not args.json and len(archive_names) - i:
print()
if args.json:
json_print(basic_json_data(manifest, cache=cache, extra={
'archives': output_data,
}))
return self.exit_code
def _info_repository(self, args, repository, manifest, key, cache):
info = basic_json_data(manifest, cache=cache, extra={
'security_dir': cache.security_manager.dir,
})
if args.json:
json_print(info)
else:
encryption = 'Encrypted: '
if key.NAME == 'plaintext':
encryption += 'No'
else:
encryption += 'Yes (%s)' % key.NAME
if key.NAME.startswith('key file'):
encryption += '\nKey file: %s' % key.find_key()
info['encryption'] = encryption
print(textwrap.dedent("""
Repository ID: {id}
Location: {location}
{encryption}
Cache: {cache.path}
Security dir: {security_dir}
""").strip().format(
id=bin_to_hex(repository.id),
location=repository._location.canonical_path(),
**info))
print(DASHES)
print(STATS_HEADER)
print(str(cache))
return self.exit_code
@with_repository(exclusive=True, compatibility=(Manifest.Operation.DELETE,))
def do_prune(self, args, repository, manifest, key):
"""Prune repository archives according to specified rules"""
if not any((args.secondly, args.minutely, args.hourly, args.daily,
args.weekly, args.monthly, args.yearly, args.within)):
self.print_error('At least one of the "keep-within", "keep-last", '
'"keep-secondly", "keep-minutely", "keep-hourly", "keep-daily", '
'"keep-weekly", "keep-monthly" or "keep-yearly" settings must be specified.')
return self.exit_code
if args.prefix is not None:
args.glob_archives = args.prefix + '*'
checkpoint_re = r'\.checkpoint(\.\d+)?'
archives_checkpoints = manifest.archives.list(glob=args.glob_archives,
match_end=r'(%s)?\Z' % checkpoint_re,
sort_by=['ts'], reverse=True)
is_checkpoint = re.compile(r'(%s)\Z' % checkpoint_re).search
checkpoints = [arch for arch in archives_checkpoints if is_checkpoint(arch.name)]
# keep the latest checkpoint, if there is no later non-checkpoint archive
if archives_checkpoints and checkpoints and archives_checkpoints[0] is checkpoints[0]:
keep_checkpoints = checkpoints[:1]
else:
keep_checkpoints = []
checkpoints = set(checkpoints)
# ignore all checkpoint archives to avoid keeping one (which is an incomplete backup)
# that is newer than a successfully completed backup - and killing the successful backup.
archives = [arch for arch in archives_checkpoints if arch not in checkpoints]
keep = []
# collect the rule responsible for the keeping of each archive in this dict
# keys are archive ids, values are a tuple
# (<rulename>, <how many archives were kept by this rule so far >)
kept_because = {}
# find archives which need to be kept because of the keep-within rule
if args.within:
keep += prune_within(archives, args.within, kept_because)
# find archives which need to be kept because of the various time period rules
for rule in PRUNING_PATTERNS.keys():
num = getattr(args, rule, None)
if num is not None:
keep += prune_split(archives, rule, num, kept_because)
to_delete = (set(archives) | checkpoints) - (set(keep) | set(keep_checkpoints))
stats = Statistics()
with Cache(repository, key, manifest, lock_wait=self.lock_wait) as cache:
list_logger = logging.getLogger('borg.output.list')
# set up counters for the progress display
to_delete_len = len(to_delete)
archives_deleted = 0
pi = ProgressIndicatorPercent(total=len(to_delete), msg='Pruning archives %3.0f%%', msgid='prune')
for archive in archives_checkpoints:
if archive in to_delete:
pi.show()
if args.dry_run:
log_message = 'Would prune:'
else:
archives_deleted += 1
log_message = 'Pruning archive (%d/%d):' % (archives_deleted, to_delete_len)
archive = Archive(repository, key, manifest, archive.name, cache,
consider_part_files=args.consider_part_files)
archive.delete(stats, forced=args.forced)
else:
if is_checkpoint(archive.name):
log_message = 'Keeping checkpoint archive:'
else:
log_message = 'Keeping archive (rule: {rule} #{num}):'.format(
rule=kept_because[archive.id][0], num=kept_because[archive.id][1]
)
if args.output_list:
list_logger.info("{message:<40} {archive}".format(
message=log_message, archive=format_archive(archive)
))
pi.finish()
if to_delete and not args.dry_run:
manifest.write()
repository.commit(compact=False, save_space=args.save_space)
cache.commit()
if args.stats:
log_multi(DASHES,
STATS_HEADER,
stats.summary.format(label='Deleted data:', stats=stats),
str(cache),
DASHES, logger=logging.getLogger('borg.output.stats'))
return self.exit_code
@with_repository(fake=('tam', 'disable_tam'), invert_fake=True, manifest=False, exclusive=True)
def do_upgrade(self, args, repository, manifest=None, key=None):
"""upgrade a repository from a previous version"""
if args.tam:
manifest, key = Manifest.load(repository, (Manifest.Operation.CHECK,), force_tam_not_required=args.force)
if not hasattr(key, 'change_passphrase'):
print('This repository is not encrypted, cannot enable TAM.')
return EXIT_ERROR
if not manifest.tam_verified or not manifest.config.get(b'tam_required', False):
# The standard archive listing doesn't include the archive ID like in borg 1.1.x
print('Manifest contents:')
for archive_info in manifest.archives.list(sort_by=['ts']):
print(format_archive(archive_info), '[%s]' % bin_to_hex(archive_info.id))
manifest.config[b'tam_required'] = True
manifest.write()
repository.commit(compact=False)
if not key.tam_required:
key.tam_required = True
key.change_passphrase(key._passphrase)
print('Key updated')
if hasattr(key, 'find_key'):
print('Key location:', key.find_key())
if not tam_required(repository):
tam_file = tam_required_file(repository)
open(tam_file, 'w').close()
print('Updated security database')
elif args.disable_tam:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK, force_tam_not_required=True)
if tam_required(repository):
os.unlink(tam_required_file(repository))
if key.tam_required:
key.tam_required = False
key.change_passphrase(key._passphrase)
print('Key updated')
if hasattr(key, 'find_key'):
print('Key location:', key.find_key())
manifest.config[b'tam_required'] = False
manifest.write()
repository.commit(compact=False)
else:
# mainly for upgrades from Attic repositories,
# but also supports borg 0.xx -> 1.0 upgrade.
repo = AtticRepositoryUpgrader(args.location.path, create=False)
try:
repo.upgrade(args.dry_run, inplace=args.inplace, progress=args.progress)
except NotImplementedError as e:
print("warning: %s" % e)
repo = BorgRepositoryUpgrader(args.location.path, create=False)
try:
repo.upgrade(args.dry_run, inplace=args.inplace, progress=args.progress)
except NotImplementedError as e:
print("warning: %s" % e)
return self.exit_code
@with_repository(cache=True, exclusive=True, compatibility=(Manifest.Operation.CHECK,))
def do_recreate(self, args, repository, manifest, key, cache):
"""Re-create archives"""
msg = ("recreate is an experimental feature.\n"
"Type 'YES' if you understand this and want to continue: ")
if not yes(msg, false_msg="Aborting.", truish=('YES',),
env_var_override='BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING'):
return EXIT_ERROR
matcher = self.build_matcher(args.patterns, args.paths)
self.output_list = args.output_list
self.output_filter = args.output_filter
recompress = args.recompress != 'never'
always_recompress = args.recompress == 'always'
recreater = ArchiveRecreater(repository, manifest, key, cache, matcher,
exclude_caches=args.exclude_caches, exclude_if_present=args.exclude_if_present,
keep_exclude_tags=args.keep_exclude_tags, chunker_params=args.chunker_params,
compression=args.compression, recompress=recompress, always_recompress=always_recompress,
progress=args.progress, stats=args.stats,
file_status_printer=self.print_file_status,
checkpoint_interval=args.checkpoint_interval,
dry_run=args.dry_run, timestamp=args.timestamp)
if args.location.archive:
name = args.location.archive
if recreater.is_temporary_archive(name):
self.print_error('Refusing to work on temporary archive of prior recreate: %s', name)
return self.exit_code
if not recreater.recreate(name, args.comment, args.target):
self.print_error('Nothing to do. Archive was not processed.\n'
'Specify at least one pattern, PATH, --comment, re-compression or re-chunking option.')
else:
if args.target is not None:
self.print_error('--target: Need to specify single archive')
return self.exit_code
for archive in manifest.archives.list(sort_by=['ts']):
name = archive.name
if recreater.is_temporary_archive(name):
continue
print('Processing', name)
if not recreater.recreate(name, args.comment):
logger.info('Skipped archive %s: Nothing to do. Archive was not processed.', name)
if not args.dry_run:
manifest.write()
repository.commit(compact=False)
cache.commit()
return self.exit_code
@with_repository(manifest=False, exclusive=True)
def do_with_lock(self, args, repository):
"""run a user specified command with the repository lock held"""
# for a new server, this will immediately take an exclusive lock.
# to support old servers, that do not have "exclusive" arg in open()
# RPC API, we also do it the old way:
# re-write manifest to start a repository transaction - this causes a
# lock upgrade to exclusive for remote (and also for local) repositories.
# by using manifest=False in the decorator, we avoid having to require
# the encryption key (and can operate just with encrypted data).
data = repository.get(Manifest.MANIFEST_ID)
repository.put(Manifest.MANIFEST_ID, data)
# usually, a 0 byte (open for writing) segment file would be visible in the filesystem here.
# we write and close this file, to rather have a valid segment file on disk, before invoking the subprocess.
# we can only do this for local repositories (with .io), though:
if hasattr(repository, 'io'):
repository.io.close_segment()
env = prepare_subprocess_env(system=True)
try:
# we exit with the return code we get from the subprocess
return subprocess.call([args.command] + args.args, env=env)
finally:
# we need to commit the "no change" operation we did to the manifest
# because it created a new segment file in the repository. if we would
# roll back, the same file would be later used otherwise (for other content).
# that would be bad if somebody uses rsync with ignore-existing (or
# any other mechanism relying on existing segment data not changing).
# see issue #1867.
repository.commit(compact=False)
@with_repository(manifest=False, exclusive=True)
def do_compact(self, args, repository):
"""compact segment files in the repository"""
# see the comment in do_with_lock about why we do it like this:
data = repository.get(Manifest.MANIFEST_ID)
repository.put(Manifest.MANIFEST_ID, data)
threshold = args.threshold / 100
repository.commit(compact=True, threshold=threshold, cleanup_commits=args.cleanup_commits)
return EXIT_SUCCESS
@with_repository(exclusive=True, manifest=False)
def do_config(self, args, repository):
"""get, set, and delete values in a repository or cache config file"""
def repo_validate(section, name, value=None, check_value=True):
if section not in ['repository', ]:
raise ValueError('Invalid section')
if name in ['segments_per_dir', 'max_segment_size', 'storage_quota', ]:
if check_value:
try:
int(value)
except ValueError:
raise ValueError('Invalid value') from None
if name == 'max_segment_size':
if int(value) >= MAX_SEGMENT_SIZE_LIMIT:
raise ValueError('Invalid value: max_segment_size >= %d' % MAX_SEGMENT_SIZE_LIMIT)
elif name in ['additional_free_space', ]:
if check_value:
try:
parse_file_size(value)
except ValueError:
raise ValueError('Invalid value') from None
elif name in ['append_only', ]:
if check_value and value not in ['0', '1']:
raise ValueError('Invalid value')
elif name in ['id', ]:
if check_value:
try:
bin_id = unhexlify(value)
except:
raise ValueError('Invalid value, must be 64 hex digits') from None
if len(bin_id) != 32:
raise ValueError('Invalid value, must be 64 hex digits')
else:
raise ValueError('Invalid name')
def cache_validate(section, name, value=None, check_value=True):
if section not in ['cache', ]:
raise ValueError('Invalid section')
if name in ['previous_location', ]:
if check_value:
Location(value)
else:
raise ValueError('Invalid name')
def list_config(config):
default_values = {
'version': '1',
'segments_per_dir': str(DEFAULT_SEGMENTS_PER_DIR),
'max_segment_size': str(MAX_SEGMENT_SIZE_LIMIT),
'additional_free_space': '0',
'storage_quota': repository.storage_quota,
'append_only': repository.append_only
}
print('[repository]')
for key in ['version', 'segments_per_dir', 'max_segment_size',
'storage_quota', 'additional_free_space', 'append_only',
'id']:
value = config.get('repository', key, fallback=False)
if value is None:
value = default_values.get(key)
if value is None:
raise Error('The repository config is missing the %s key which has no default value' % key)
print('%s = %s' % (key, value))
if not args.list:
if args.name is None:
self.print_error('No config key name was provided.')
return self.exit_code
try:
section, name = args.name.split('.')
except ValueError:
section = args.cache and "cache" or "repository"
name = args.name
if args.cache:
manifest, key = Manifest.load(repository, (Manifest.Operation.WRITE,))
assert_secure(repository, manifest, self.lock_wait)
cache = Cache(repository, key, manifest, lock_wait=self.lock_wait)
try:
if args.cache:
cache.cache_config.load()
config = cache.cache_config._config
save = cache.cache_config.save
validate = cache_validate
else:
config = repository.config
save = lambda: repository.save_config(repository.path, repository.config) # noqa
validate = repo_validate
if args.delete:
validate(section, name, check_value=False)
config.remove_option(section, name)
if len(config.options(section)) == 0:
config.remove_section(section)
save()
elif args.list:
list_config(config)
elif args.value:
validate(section, name, args.value)
if section not in config.sections():
config.add_section(section)
config.set(section, name, args.value)
save()
else:
try:
print(config.get(section, name))
except (configparser.NoOptionError, configparser.NoSectionError) as e:
print(e, file=sys.stderr)
return EXIT_WARNING
return EXIT_SUCCESS
finally:
if args.cache:
cache.close()
def do_debug_info(self, args):
"""display system information for debugging / bug reports"""
print(sysinfo())
# Additional debug information
print('CRC implementation:', crc32.__name__)
print('Process ID:', get_process_id())
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_dump_archive_items(self, args, repository, manifest, key):
"""dump (decrypted, decompressed) archive items metadata (not: data)"""
archive = Archive(repository, key, manifest, args.location.archive,
consider_part_files=args.consider_part_files)
for i, item_id in enumerate(archive.metadata.items):
data = key.decrypt(item_id, repository.get(item_id))
filename = '%06d_%s.items' % (i, bin_to_hex(item_id))
print('Dumping', filename)
with open(filename, 'wb') as fd:
fd.write(data)
print('Done.')
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_dump_archive(self, args, repository, manifest, key):
"""dump decoded archive metadata (not: data)"""
try:
archive_meta_orig = manifest.archives.get_raw_dict()[safe_encode(args.location.archive)]
except KeyError:
raise Archive.DoesNotExist(args.location.archive)
indent = 4
def do_indent(d):
return textwrap.indent(json.dumps(d, indent=indent), prefix=' ' * indent)
def output(fd):
# this outputs megabytes of data for a modest sized archive, so some manual streaming json output
fd.write('{\n')
fd.write(' "_name": ' + json.dumps(args.location.archive) + ",\n")
fd.write(' "_manifest_entry":\n')
fd.write(do_indent(prepare_dump_dict(archive_meta_orig)))
fd.write(',\n')
data = key.decrypt(archive_meta_orig[b'id'], repository.get(archive_meta_orig[b'id']))
archive_org_dict = msgpack.unpackb(data, object_hook=StableDict)
fd.write(' "_meta":\n')
fd.write(do_indent(prepare_dump_dict(archive_org_dict)))
fd.write(',\n')
fd.write(' "_items": [\n')
unpacker = msgpack.Unpacker(use_list=False, object_hook=StableDict)
first = True
for item_id in archive_org_dict[b'items']:
data = key.decrypt(item_id, repository.get(item_id))
unpacker.feed(data)
for item in unpacker:
item = prepare_dump_dict(item)
if first:
first = False
else:
fd.write(',\n')
fd.write(do_indent(item))
fd.write('\n')
fd.write(' ]\n}\n')
with dash_open(args.path, 'w') as fd:
output(fd)
return EXIT_SUCCESS
@with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_dump_manifest(self, args, repository, manifest, key):
"""dump decoded repository manifest"""
data = key.decrypt(None, repository.get(manifest.MANIFEST_ID))
meta = prepare_dump_dict(msgpack.unpackb(data, object_hook=StableDict))
with dash_open(args.path, 'w') as fd:
json.dump(meta, fd, indent=4)
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_debug_dump_repo_objs(self, args, repository):
"""dump (decrypted, decompressed) repo objects, repo index MUST be current/correct"""
from .crypto.key import key_factory
def decrypt_dump(i, id, cdata, tag=None, segment=None, offset=None):
if cdata is not None:
give_id = id if id != Manifest.MANIFEST_ID else None
data = key.decrypt(give_id, cdata)
else:
data = b''
tag_str = '' if tag is None else '_' + tag
segment_str = '_' + str(segment) if segment is not None else ''
offset_str = '_' + str(offset) if offset is not None else ''
id_str = '_' + bin_to_hex(id) if id is not None else ''
filename = '%08d%s%s%s%s.obj' % (i, segment_str, offset_str, tag_str, id_str)
print('Dumping', filename)
with open(filename, 'wb') as fd:
fd.write(data)
if args.ghost:
# dump ghosty stuff from segment files: not yet committed objects, deleted / superceded objects, commit tags
# set up the key without depending on a manifest obj
for id, cdata, tag, segment, offset in repository.scan_low_level():
if tag == TAG_PUT:
key = key_factory(repository, cdata)
break
i = 0
for id, cdata, tag, segment, offset in repository.scan_low_level():
if tag == TAG_PUT:
decrypt_dump(i, id, cdata, tag='put', segment=segment, offset=offset)
elif tag == TAG_DELETE:
decrypt_dump(i, id, None, tag='del', segment=segment, offset=offset)
elif tag == TAG_COMMIT:
decrypt_dump(i, None, None, tag='commit', segment=segment, offset=offset)
i += 1
else:
# set up the key without depending on a manifest obj
ids = repository.list(limit=1, marker=None)
cdata = repository.get(ids[0])
key = key_factory(repository, cdata)
marker = None
i = 0
while True:
result = repository.scan(limit=LIST_SCAN_LIMIT, marker=marker) # must use on-disk order scanning here
if not result:
break
marker = result[-1]
for id in result:
cdata = repository.get(id)
decrypt_dump(i, id, cdata)
i += 1
print('Done.')
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_debug_search_repo_objs(self, args, repository):
"""search for byte sequences in repo objects, repo index MUST be current/correct"""
context = 32
def print_finding(info, wanted, data, offset):
before = data[offset - context:offset]
after = data[offset + len(wanted):offset + len(wanted) + context]
print('%s: %s %s %s == %r %r %r' % (info, before.hex(), wanted.hex(), after.hex(),
before, wanted, after))
wanted = args.wanted
try:
if wanted.startswith('hex:'):
wanted = unhexlify(wanted[4:])
elif wanted.startswith('str:'):
wanted = wanted[4:].encode()
else:
raise ValueError('unsupported search term')
except (ValueError, UnicodeEncodeError):
wanted = None
if not wanted:
self.print_error('search term needs to be hex:123abc or str:foobar style')
return EXIT_ERROR
from .crypto.key import key_factory
# set up the key without depending on a manifest obj
ids = repository.list(limit=1, marker=None)
cdata = repository.get(ids[0])
key = key_factory(repository, cdata)
marker = None
last_data = b''
last_id = None
i = 0
while True:
result = repository.scan(limit=LIST_SCAN_LIMIT, marker=marker) # must use on-disk order scanning here
if not result:
break
marker = result[-1]
for id in result:
cdata = repository.get(id)
give_id = id if id != Manifest.MANIFEST_ID else None
data = key.decrypt(give_id, cdata)
# try to locate wanted sequence crossing the border of last_data and data
boundary_data = last_data[-(len(wanted) - 1):] + data[:len(wanted) - 1]
if wanted in boundary_data:
boundary_data = last_data[-(len(wanted) - 1 + context):] + data[:len(wanted) - 1 + context]
offset = boundary_data.find(wanted)
info = '%d %s | %s' % (i, last_id.hex(), id.hex())
print_finding(info, wanted, boundary_data, offset)
# try to locate wanted sequence in data
count = data.count(wanted)
if count:
offset = data.find(wanted) # only determine first occurance's offset
info = "%d %s #%d" % (i, id.hex(), count)
print_finding(info, wanted, data, offset)
last_id, last_data = id, data
i += 1
if i % 10000 == 0:
print('%d objects processed.' % i)
print('Done.')
return EXIT_SUCCESS
@with_repository(manifest=False)
def do_debug_get_obj(self, args, repository):
"""get object contents from the repository and write it into file"""
hex_id = args.id
try:
id = unhexlify(hex_id)
except ValueError:
print("object id %s is invalid." % hex_id)
else:
try:
data = repository.get(id)
except Repository.ObjectNotFound:
print("object %s not found." % hex_id)
else:
with open(args.path, "wb") as f:
f.write(data)
print("object %s fetched." % hex_id)
return EXIT_SUCCESS
@with_repository(manifest=False, exclusive=True)
def do_debug_put_obj(self, args, repository):
"""put file(s) contents into the repository"""
for path in args.paths:
with open(path, "rb") as f:
data = f.read()
h = hashlib.sha256(data) # XXX hardcoded
repository.put(h.digest(), data)
print("object %s put." % h.hexdigest())
repository.commit(compact=False)
return EXIT_SUCCESS
@with_repository(manifest=False, exclusive=True)
def do_debug_delete_obj(self, args, repository):
"""delete the objects with the given IDs from the repo"""
modified = False
for hex_id in args.ids:
try:
id = unhexlify(hex_id)
except ValueError:
print("object id %s is invalid." % hex_id)
else:
try:
repository.delete(id)
modified = True
print("object %s deleted." % hex_id)
except Repository.ObjectNotFound:
print("object %s not found." % hex_id)
if modified:
repository.commit(compact=False)
print('Done.')
return EXIT_SUCCESS
@with_repository(manifest=False, exclusive=True, cache=True, compatibility=Manifest.NO_OPERATION_CHECK)
def do_debug_refcount_obj(self, args, repository, manifest, key, cache):
"""display refcounts for the objects with the given IDs"""
for hex_id in args.ids:
try:
id = unhexlify(hex_id)
except ValueError:
print("object id %s is invalid." % hex_id)
else:
try:
refcount = cache.chunks[id][0]
print("object %s has %d referrers [info from chunks cache]." % (hex_id, refcount))
except KeyError:
print("object %s not found [info from chunks cache]." % hex_id)
return EXIT_SUCCESS
def do_debug_convert_profile(self, args):
"""convert Borg profile to Python profile"""
import marshal
with args.output, args.input:
marshal.dump(msgpack.mp_unpack(args.input, use_list=False, raw=False), args.output)
return EXIT_SUCCESS
@with_repository(lock=False, manifest=False)
def do_break_lock(self, args, repository):
"""Break the repository lock (e.g. in case it was left by a dead borg."""
repository.break_lock()
Cache.break_lock(repository)
return self.exit_code
helptext = collections.OrderedDict()
helptext['patterns'] = textwrap.dedent('''
The path/filenames used as input for the pattern matching start from the
currently active recursion root. You usually give the recursion root(s)
when invoking borg and these can be either relative or absolute paths.
So, when you give `relative/` as root, the paths going into the matcher
will look like `relative/.../file.ext`. When you give `/absolute/` as root,
they will look like `/absolute/.../file.ext`. This is meant when we talk
about "full path" below.
File patterns support these styles: fnmatch, shell, regular expressions,
path prefixes and path full-matches. By default, fnmatch is used for
``--exclude`` patterns and shell-style is used for the experimental ``--pattern``
option.
If followed by a colon (':') the first two characters of a pattern are used as a
style selector. Explicit style selection is necessary when a
non-default style is desired or when the desired pattern starts with
two alphanumeric characters followed by a colon (i.e. `aa:something/*`).
`Fnmatch <https://docs.python.org/3/library/fnmatch.html>`_, selector `fm:`
This is the default style for ``--exclude`` and ``--exclude-from``.
These patterns use a variant of shell pattern syntax, with '\\*' matching
any number of characters, '?' matching any single character, '[...]'
matching any single character specified, including ranges, and '[!...]'
matching any character not specified. For the purpose of these patterns,
the path separator (backslash for Windows and '/' on other systems) is not
treated specially. Wrap meta-characters in brackets for a literal
match (i.e. `[?]` to match the literal character `?`). For a path
to match a pattern, the full path must match, or it must match
from the start of the full path to just before a path separator. Except
for the root path, paths will never end in the path separator when
matching is attempted. Thus, if a given pattern ends in a path
separator, a '\\*' is appended before matching is attempted.
Shell-style patterns, selector `sh:`
This is the default style for ``--pattern`` and ``--patterns-from``.
Like fnmatch patterns these are similar to shell patterns. The difference
is that the pattern may include `**/` for matching zero or more directory
levels, `*` for matching zero or more arbitrary characters with the
exception of any path separator.
Regular expressions, selector `re:`
Regular expressions similar to those found in Perl are supported. Unlike
shell patterns regular expressions are not required to match the full
path and any substring match is sufficient. It is strongly recommended to
anchor patterns to the start ('^'), to the end ('$') or both. Path
separators (backslash for Windows and '/' on other systems) in paths are
always normalized to a forward slash ('/') before applying a pattern. The
regular expression syntax is described in the `Python documentation for
the re module <https://docs.python.org/3/library/re.html>`_.
Path prefix, selector `pp:`
This pattern style is useful to match whole sub-directories. The pattern
`pp:root/somedir` matches `root/somedir` and everything therein.
Path full-match, selector `pf:`
This pattern style is (only) useful to match full paths.
This is kind of a pseudo pattern as it can not have any variable or
unspecified parts - the full path must be given.
`pf:root/file.ext` matches `root/file.txt` only.
Implementation note: this is implemented via very time-efficient O(1)
hashtable lookups (this means you can have huge amounts of such patterns
without impacting performance much).
Due to that, this kind of pattern does not respect any context or order.
If you use such a pattern to include a file, it will always be included
(if the directory recursion encounters it).
Other include/exclude patterns that would normally match will be ignored.
Same logic applies for exclude.
.. note::
`re:`, `sh:` and `fm:` patterns are all implemented on top of the Python SRE
engine. It is very easy to formulate patterns for each of these types which
requires an inordinate amount of time to match paths. If untrusted users
are able to supply patterns, ensure they cannot supply `re:` patterns.
Further, ensure that `sh:` and `fm:` patterns only contain a handful of
wildcards at most.
Exclusions can be passed via the command line option ``--exclude``. When used
from within a shell the patterns should be quoted to protect them from
expansion.
The ``--exclude-from`` option permits loading exclusion patterns from a text
file with one pattern per line. Lines empty or starting with the number sign
('#') after removing whitespace on both ends are ignored. The optional style
selector prefix is also supported for patterns loaded from a file. Due to
whitespace removal paths with whitespace at the beginning or end can only be
excluded using regular expressions.
To test your exclusion patterns without performing an actual backup you can
run ``borg create --list --dry-run ...``.
Examples::
# Exclude '/home/user/file.o' but not '/home/user/file.odt':
$ borg create -e '*.o' backup /
# Exclude '/home/user/junk' and '/home/user/subdir/junk' but
# not '/home/user/importantjunk' or '/etc/junk':
$ borg create -e '/home/*/junk' backup /
# Exclude the contents of '/home/user/cache' but not the directory itself:
$ borg create -e /home/user/cache/ backup /
# The file '/home/user/cache/important' is *not* backed up:
$ borg create -e /home/user/cache/ backup / /home/user/cache/important
# The contents of directories in '/home' are not backed up when their name
# ends in '.tmp'
$ borg create --exclude 're:^/home/[^/]+\\.tmp/' backup /
# Load exclusions from file
$ cat >exclude.txt <<EOF
# Comment line
/home/*/junk
*.tmp
fm:aa:something/*
re:^/home/[^/]\\.tmp/
sh:/home/*/.thumbnails
EOF
$ borg create --exclude-from exclude.txt backup /
.. container:: experimental
A more general and easier to use way to define filename matching patterns exists
with the experimental ``--pattern`` and ``--patterns-from`` options. Using these, you
may specify the backup roots (starting points) and patterns for inclusion/exclusion.
A root path starts with the prefix `R`, followed by a path (a plain path, not a
file pattern). An include rule starts with the prefix +, an exclude rule starts
with the prefix -, an exclude-norecurse rule starts with !, all followed by a pattern.
.. note::
Via ``--pattern`` or ``--patterns-from`` you can define BOTH inclusion and exclusion
of files using pattern prefixes ``+`` and ``-``. With ``--exclude`` and
``--exlude-from`` ONLY excludes are defined.
Inclusion patterns are useful to include paths that are contained in an excluded
path. The first matching pattern is used so if an include pattern matches before
an exclude pattern, the file is backed up. If an exclude-norecurse pattern matches
a directory, it won't recurse into it and won't discover any potential matches for
include rules below that directory.
Note that the default pattern style for ``--pattern`` and ``--patterns-from`` is
shell style (`sh:`), so those patterns behave similar to rsync include/exclude
patterns. The pattern style can be set via the `P` prefix.
Patterns (``--pattern``) and excludes (``--exclude``) from the command line are
considered first (in the order of appearance). Then patterns from ``--patterns-from``
are added. Exclusion patterns from ``--exclude-from`` files are appended last.
Examples::
# backup pics, but not the ones from 2018, except the good ones:
# note: using = is essential to avoid cmdline argument parsing issues.
borg create --pattern=+pics/2018/good --pattern=-pics/2018 repo::arch pics
# use a file with patterns:
borg create --patterns-from patterns.lst repo::arch
The patterns.lst file could look like that::
# "sh:" pattern style is the default, so the following line is not needed:
P sh
R /
# can be rebuild
- /home/*/.cache
# they're downloads for a reason
- /home/*/Downloads
# susan is a nice person
# include susans home
+ /home/susan
# don't backup the other home directories
- /home/*
# don't even look in /proc
! /proc\n\n''')
helptext['placeholders'] = textwrap.dedent('''
Repository (or Archive) URLs, ``--prefix``, ``--glob-archives``, ``--comment``
and ``--remote-path`` values support these placeholders:
{hostname}
The (short) hostname of the machine.
{fqdn}
The full name of the machine.
{reverse-fqdn}
The full name of the machine in reverse domain name notation.
{now}
The current local date and time, by default in ISO-8601 format.
You can also supply your own `format string <https://docs.python.org/3.5/library/datetime.html#strftime-and-strptime-behavior>`_, e.g. {now:%Y-%m-%d_%H:%M:%S}
{utcnow}
The current UTC date and time, by default in ISO-8601 format.
You can also supply your own `format string <https://docs.python.org/3.5/library/datetime.html#strftime-and-strptime-behavior>`_, e.g. {utcnow:%Y-%m-%d_%H:%M:%S}
{user}
The user name (or UID, if no name is available) of the user running borg.
{pid}
The current process ID.
{borgversion}
The version of borg, e.g.: 1.0.8rc1
{borgmajor}
The version of borg, only the major version, e.g.: 1
{borgminor}
The version of borg, only major and minor version, e.g.: 1.0
{borgpatch}
The version of borg, only major, minor and patch version, e.g.: 1.0.8
If literal curly braces need to be used, double them for escaping::
borg create /path/to/repo::{{literal_text}}
Examples::
borg create /path/to/repo::{hostname}-{user}-{utcnow} ...
borg create /path/to/repo::{hostname}-{now:%Y-%m-%d_%H:%M:%S} ...
borg prune --prefix '{hostname}-' ...
.. note::
systemd uses a difficult, non-standard syntax for command lines in unit files (refer to
the `systemd.unit(5)` manual page).
When invoking borg from unit files, pay particular attention to escaping,
especially when using the now/utcnow placeholders, since systemd performs its own
%-based variable replacement even in quoted text. To avoid interference from systemd,
double all percent signs (``{hostname}-{now:%Y-%m-%d_%H:%M:%S}``
becomes ``{hostname}-{now:%%Y-%%m-%%d_%%H:%%M:%%S}``).\n\n''')
helptext['compression'] = textwrap.dedent('''
It is no problem to mix different compression methods in one repo,
deduplication is done on the source data chunks (not on the compressed
or encrypted data).
If some specific chunk was once compressed and stored into the repo, creating
another backup that also uses this chunk will not change the stored chunk.
So if you use different compression specs for the backups, whichever stores a
chunk first determines its compression. See also borg recreate.
Compression is lz4 by default. If you want something else, you have to specify what you want.
Valid compression specifiers are:
none
Do not compress.
lz4
Use lz4 compression. Very high speed, very low compression. (default)
zstd[,L]
Use zstd ("zstandard") compression, a modern wide-range algorithm.
If you do not explicitly give the compression level L (ranging from 1
to 22), it will use level 3.
Archives compressed with zstd are not compatible with borg < 1.1.4.
zlib[,L]
Use zlib ("gz") compression. Medium speed, medium compression.
If you do not explicitly give the compression level L (ranging from 0
to 9), it will use level 6.
Giving level 0 (means "no compression", but still has zlib protocol
overhead) is usually pointless, you better use "none" compression.
lzma[,L]
Use lzma ("xz") compression. Low speed, high compression.
If you do not explicitly give the compression level L (ranging from 0
to 9), it will use level 6.
Giving levels above 6 is pointless and counterproductive because it does
not compress better due to the buffer size used by borg - but it wastes
lots of CPU cycles and RAM.
auto,C[,L]
Use a built-in heuristic to decide per chunk whether to compress or not.
The heuristic tries with lz4 whether the data is compressible.
For incompressible data, it will not use compression (uses "none").
For compressible data, it uses the given C[,L] compression - with C[,L]
being any valid compression specifier.
Examples::
borg create --compression lz4 REPO::ARCHIVE data
borg create --compression zstd REPO::ARCHIVE data
borg create --compression zstd,10 REPO::ARCHIVE data
borg create --compression zlib REPO::ARCHIVE data
borg create --compression zlib,1 REPO::ARCHIVE data
borg create --compression auto,lzma,6 REPO::ARCHIVE data
borg create --compression auto,lzma ...\n\n''')
def do_help(self, parser, commands, args):
if not args.topic:
parser.print_help()
elif args.topic in self.helptext:
print(rst_to_terminal(self.helptext[args.topic]))
elif args.topic in commands:
if args.epilog_only:
print(commands[args.topic].epilog)
elif args.usage_only:
commands[args.topic].epilog = None
commands[args.topic].print_help()
else:
commands[args.topic].print_help()
else:
msg_lines = []
msg_lines += ['No help available on %s.' % args.topic]
msg_lines += ['Try one of the following:']
msg_lines += [' Commands: %s' % ', '.join(sorted(commands.keys()))]
msg_lines += [' Topics: %s' % ', '.join(sorted(self.helptext.keys()))]
parser.error('\n'.join(msg_lines))
return self.exit_code
def do_subcommand_help(self, parser, args):
"""display infos about subcommand"""
parser.print_help()
return EXIT_SUCCESS
do_maincommand_help = do_subcommand_help
def preprocess_args(self, args):
deprecations = [
# ('--old', '--new' or None, 'Warning: "--old" has been deprecated. Use "--new" instead.'),
('--noatime', None, 'Warning: "--noatime" has been deprecated because it is the default now.')
]
for i, arg in enumerate(args[:]):
for old_name, new_name, warning in deprecations:
if arg.startswith(old_name):
if new_name is not None:
args[i] = arg.replace(old_name, new_name)
print(warning, file=sys.stderr)
return args
class CommonOptions:
"""
Support class to allow specifying common options directly after the top-level command.
Normally options can only be specified on the parser defining them, which means
that generally speaking *all* options go after all sub-commands. This is annoying
for common options in scripts, e.g. --remote-path or logging options.
This class allows adding the same set of options to both the top-level parser
and the final sub-command parsers (but not intermediary sub-commands, at least for now).
It does so by giving every option's target name ("dest") a suffix indicating its level
-- no two options in the parser hierarchy can have the same target --
then, after parsing the command line, multiple definitions are resolved.
Defaults are handled by only setting them on the top-level parser and setting
a sentinel object in all sub-parsers, which then allows one to discern which parser
supplied the option.
"""
def __init__(self, define_common_options, suffix_precedence):
"""
*define_common_options* should be a callable taking one argument, which
will be a argparse.Parser.add_argument-like function.
*define_common_options* will be called multiple times, and should call
the passed function to define common options exactly the same way each time.
*suffix_precedence* should be a tuple of the suffixes that will be used.
It is ordered from lowest precedence to highest precedence:
An option specified on the parser belonging to index 0 is overridden if the
same option is specified on any parser with a higher index.
"""
self.define_common_options = define_common_options
self.suffix_precedence = suffix_precedence
# Maps suffixes to sets of target names.
# E.g. common_options["_subcommand"] = {..., "log_level", ...}
self.common_options = dict()
# Set of options with the 'append' action.
self.append_options = set()
# This is the sentinel object that replaces all default values in parsers
# below the top-level parser.
self.default_sentinel = object()
def add_common_group(self, parser, suffix, provide_defaults=False):
"""
Add common options to *parser*.
*provide_defaults* must only be True exactly once in a parser hierarchy,
at the top level, and False on all lower levels. The default is chosen
accordingly.
*suffix* indicates the suffix to use internally. It also indicates
which precedence the *parser* has for common options. See *suffix_precedence*
of __init__.
"""
assert suffix in self.suffix_precedence
def add_argument(*args, **kwargs):
if 'dest' in kwargs:
kwargs.setdefault('action', 'store')
assert kwargs['action'] in ('help', 'store_const', 'store_true', 'store_false', 'store', 'append')
is_append = kwargs['action'] == 'append'
if is_append:
self.append_options.add(kwargs['dest'])
assert kwargs['default'] == [], 'The default is explicitly constructed as an empty list in resolve()'
else:
self.common_options.setdefault(suffix, set()).add(kwargs['dest'])
kwargs['dest'] += suffix
if not provide_defaults:
# Interpolate help now, in case the %(default)d (or so) is mentioned,
# to avoid producing incorrect help output.
# Assumption: Interpolated output can safely be interpolated again,
# which should always be the case.
# Note: We control all inputs.
kwargs['help'] = kwargs['help'] % kwargs
if not is_append:
kwargs['default'] = self.default_sentinel
common_group.add_argument(*args, **kwargs)
common_group = parser.add_argument_group('Common options')
self.define_common_options(add_argument)
def resolve(self, args: argparse.Namespace): # Namespace has "in" but otherwise is not like a dict.
"""
Resolve the multiple definitions of each common option to the final value.
"""
for suffix in self.suffix_precedence:
# From highest level to lowest level, so the "most-specific" option wins, e.g.
# "borg --debug create --info" shall result in --info being effective.
for dest in self.common_options.get(suffix, []):
# map_from is this suffix' option name, e.g. log_level_subcommand
# map_to is the target name, e.g. log_level
map_from = dest + suffix
map_to = dest
# Retrieve value; depending on the action it may not exist, but usually does
# (store_const/store_true/store_false), either because the action implied a default
# or a default is explicitly supplied.
# Note that defaults on lower levels are replaced with default_sentinel.
# Only the top level has defaults.
value = getattr(args, map_from, self.default_sentinel)
if value is not self.default_sentinel:
# value was indeed specified on this level. Transfer value to target,
# and un-clobber the args (for tidiness - you *cannot* use the suffixed
# names for other purposes, obviously).
setattr(args, map_to, value)
try:
delattr(args, map_from)
except AttributeError:
pass
# Options with an "append" action need some special treatment. Instead of
# overriding values, all specified values are merged together.
for dest in self.append_options:
option_value = []
for suffix in self.suffix_precedence:
# Find values of this suffix, if any, and add them to the final list
extend_from = dest + suffix
if extend_from in args:
values = getattr(args, extend_from)
delattr(args, extend_from)
option_value.extend(values)
setattr(args, dest, option_value)
def build_parser(self):
# You can use :ref:`xyz` in the following usage pages. However, for plain-text view,
# e.g. through "borg ... --help", define a substitution for the reference here.
# It will replace the entire :ref:`foo` verbatim.
rst_plain_text_references = {
'a_status_oddity': '"I am seeing ‘A’ (added) status for a unchanged file!?"',
'separate_compaction': '"Separate compaction"',
}
def process_epilog(epilog):
epilog = textwrap.dedent(epilog).splitlines()
try:
mode = borg.doc_mode
except AttributeError:
mode = 'command-line'
if mode in ('command-line', 'build_usage'):
epilog = [line for line in epilog if not line.startswith('.. man')]
epilog = '\n'.join(epilog)
if mode == 'command-line':
epilog = rst_to_terminal(epilog, rst_plain_text_references)
return epilog
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level',
action='store_const', const='critical', default='warning',
help='work on log level CRITICAL')
add_common_option('--error', dest='log_level',
action='store_const', const='error', default='warning',
help='work on log level ERROR')
add_common_option('--warning', dest='log_level',
action='store_const', const='warning', default='warning',
help='work on log level WARNING (default)')
add_common_option('--info', '-v', '--verbose', dest='log_level',
action='store_const', const='info', default='warning',
help='work on log level INFO')
add_common_option('--debug', dest='log_level',
action='store_const', const='debug', default='warning',
help='enable debug output, work on log level DEBUG')
add_common_option('--debug-topic', metavar='TOPIC', dest='debug_topics', action='append', default=[],
help='enable TOPIC debugging (can be specified multiple times). '
'The logger path is borg.debug.<TOPIC> if TOPIC is not fully qualified.')
add_common_option('-p', '--progress', dest='progress', action='store_true',
help='show progress information')
add_common_option('--log-json', dest='log_json', action='store_true',
help='Output one JSON object per log line instead of formatted text.')
add_common_option('--lock-wait', metavar='SECONDS', dest='lock_wait', type=int, default=1,
help='wait at most SECONDS for acquiring a repository/cache lock (default: %(default)d).')
add_common_option('--show-version', dest='show_version', action='store_true',
help='show/log the borg version')
add_common_option('--show-rc', dest='show_rc', action='store_true',
help='show/log the return code (rc)')
add_common_option('--umask', metavar='M', dest='umask', type=lambda s: int(s, 8), default=UMASK_DEFAULT,
help='set umask to M (local and remote, default: %(default)04o)')
add_common_option('--remote-path', metavar='PATH', dest='remote_path',
help='use PATH as borg executable on the remote (default: "borg")')
add_common_option('--remote-ratelimit', metavar='RATE', dest='remote_ratelimit', type=int,
help='set remote network upload rate limit in kiByte/s (default: 0=unlimited)')
add_common_option('--consider-part-files', dest='consider_part_files', action='store_true',
help='treat part files like normal files (e.g. to list/extract them)')
add_common_option('--debug-profile', metavar='FILE', dest='debug_profile', default=None,
help='Write execution profile in Borg format into FILE. For local use a Python-'
'compatible file can be generated by suffixing FILE with ".pyprof".')
add_common_option('--rsh', metavar='RSH', dest='rsh',
help="Use this command to connect to the 'borg serve' process (default: 'ssh')")
def define_exclude_and_patterns(add_option, *, tag_files=False, strip_components=False):
add_option('-e', '--exclude', metavar='PATTERN', dest='patterns',
type=parse_exclude_pattern, action='append',
help='exclude paths matching PATTERN')
add_option('--exclude-from', metavar='EXCLUDEFILE', action=ArgparseExcludeFileAction,
help='read exclude patterns from EXCLUDEFILE, one per line')
add_option('--pattern', metavar='PATTERN', action=ArgparsePatternAction,
help='experimental: include/exclude paths matching PATTERN')
add_option('--patterns-from', metavar='PATTERNFILE', action=ArgparsePatternFileAction,
help='experimental: read include/exclude patterns from PATTERNFILE, one per line')
if tag_files:
add_option('--exclude-caches', dest='exclude_caches', action='store_true',
help='exclude directories that contain a CACHEDIR.TAG file '
'(http://www.bford.info/cachedir/spec.html)')
add_option('--exclude-if-present', metavar='NAME', dest='exclude_if_present',
action='append', type=str,
help='exclude directories that are tagged by containing a filesystem object with '
'the given NAME')
add_option('--keep-exclude-tags', dest='keep_exclude_tags',
action='store_true',
help='if tag objects are specified with ``--exclude-if-present``, '
'don\'t omit the tag objects themselves from the backup archive')
if strip_components:
add_option('--strip-components', metavar='NUMBER', dest='strip_components', type=int, default=0,
help='Remove the specified number of leading path elements. '
'Paths with fewer elements will be silently skipped.')
def define_exclusion_group(subparser, **kwargs):
exclude_group = subparser.add_argument_group('Exclusion options')
define_exclude_and_patterns(exclude_group.add_argument, **kwargs)
return exclude_group
def define_archive_filters_group(subparser, *, sort_by=True, first_last=True):
filters_group = subparser.add_argument_group('Archive filters',
'Archive filters can be applied to repository targets.')
group = filters_group.add_mutually_exclusive_group()
group.add_argument('-P', '--prefix', metavar='PREFIX', dest='prefix', type=PrefixSpec, default=None,
help='only consider archive names starting with this prefix.')
group.add_argument('-a', '--glob-archives', metavar='GLOB', dest='glob_archives',
type=GlobSpec, default=None,
help='only consider archive names matching the glob. '
'sh: rules apply, see "borg help patterns". '
'``--prefix`` and ``--glob-archives`` are mutually exclusive.')
if sort_by:
sort_by_default = 'timestamp'
filters_group.add_argument('--sort-by', metavar='KEYS', dest='sort_by',
type=SortBySpec, default=sort_by_default,
help='Comma-separated list of sorting keys; valid keys are: {}; default is: {}'
.format(', '.join(AI_HUMAN_SORT_KEYS), sort_by_default))
if first_last:
group = filters_group.add_mutually_exclusive_group()
group.add_argument('--first', metavar='N', dest='first', default=0, type=positive_int_validator,
help='consider first N archives after other filters were applied')
group.add_argument('--last', metavar='N', dest='last', default=0, type=positive_int_validator,
help='consider last N archives after other filters were applied')
def define_borg_mount(parser):
parser.set_defaults(func=self.do_mount)
parser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', type=location_validator(),
help='repository or archive to mount')
parser.add_argument('mountpoint', metavar='MOUNTPOINT', type=str,
help='where to mount filesystem')
parser.add_argument('-f', '--foreground', dest='foreground',
action='store_true',
help='stay in foreground, do not daemonize')
parser.add_argument('-o', dest='options', type=str,
help='Extra mount options')
define_archive_filters_group(parser)
parser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths to extract; patterns are supported')
define_exclusion_group(parser, strip_components=True)
parser = argparse.ArgumentParser(prog=self.prog, description='Borg - Deduplicated Backups',
add_help=False)
# paths and patterns must have an empty list as default everywhere
parser.set_defaults(fallback2_func=functools.partial(self.do_maincommand_help, parser),
paths=[], patterns=[])
parser.common_options = self.CommonOptions(define_common_options,
suffix_precedence=('_maincommand', '_midcommand', '_subcommand'))
parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__,
help='show version number and exit')
parser.common_options.add_common_group(parser, '_maincommand', provide_defaults=True)
common_parser = argparse.ArgumentParser(add_help=False, prog=self.prog)
common_parser.set_defaults(paths=[], patterns=[])
parser.common_options.add_common_group(common_parser, '_subcommand')
mid_common_parser = argparse.ArgumentParser(add_help=False, prog=self.prog)
mid_common_parser.set_defaults(paths=[], patterns=[])
parser.common_options.add_common_group(mid_common_parser, '_midcommand')
# borg mount
mount_epilog = process_epilog("""
This command mounts an archive as a FUSE filesystem. This can be useful for
browsing an archive or restoring individual files. Unless the ``--foreground``
option is given the command will run in the background until the filesystem
is ``umounted``.
The command ``borgfs`` provides a wrapper for ``borg mount``. This can also be
used in fstab entries:
``/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0``
To allow a regular user to use fstab entries, add the ``user`` option:
``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0``
For FUSE configuration and mount options, see the mount.fuse(8) manual page.
Additional mount options supported by borg:
- versions: when used with a repository mount, this gives a merged, versioned
view of the files in the archives. EXPERIMENTAL, layout may change in future.
- allow_damaged_files: by default damaged files (where missing chunks were
replaced with runs of zeros by borg check ``--repair``) are not readable and
return EIO (I/O error). Set this option to read such files.
- ignore_permissions: for security reasons the "default_permissions" mount
option is internally enforced by borg. "ignore_permissions" can be given to
not enforce "default_permissions".
The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users
to tweak the performance. It sets the number of cached data chunks; additional
memory usage can be up to ~8 MiB times this number. The default is the number
of CPU cores.
When the daemonized process receives a signal or crashes, it does not unmount.
Unmounting in these cases could cause an active rsync or similar process
to unintentionally delete data.
When running in the foreground ^C/SIGINT unmounts cleanly, but other
signals or crashes do not.
""")
if parser.prog == 'borgfs':
parser.description = self.do_mount.__doc__
parser.epilog = mount_epilog
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.help = 'mount repository'
define_borg_mount(parser)
return parser
subparsers = parser.add_subparsers(title='required arguments', metavar='<command>')
# borg benchmark
benchmark_epilog = process_epilog("These commands do various benchmarks.")
subparser = subparsers.add_parser('benchmark', parents=[mid_common_parser], add_help=False,
description='benchmark command',
epilog=benchmark_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='benchmark command')
benchmark_parsers = subparser.add_subparsers(title='required arguments', metavar='<command>')
subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser))
bench_crud_epilog = process_epilog("""
This command benchmarks borg CRUD (create, read, update, delete) operations.
It creates input data below the given PATH and backups this data into the given REPO.
The REPO must already exist (it could be a fresh empty repo or an existing repo, the
command will create / read / update / delete some archives named borg-benchmark-crud\\* there.
Make sure you have free space there, you'll need about 1GB each (+ overhead).
If your repository is encrypted and borg needs a passphrase to unlock the key, use::
BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH
Measurements are done with different input file sizes and counts.
The file contents are very artificial (either all zero or all random),
thus the measurement results do not necessarily reflect performance with real data.
Also, due to the kind of content used, no compression is used in these benchmarks.
C- == borg create (1st archive creation, no compression, do not use files cache)
C-Z- == all-zero files. full dedup, this is primarily measuring reader/chunker/hasher.
C-R- == random files. no dedup, measuring throughput through all processing stages.
R- == borg extract (extract archive, dry-run, do everything, but do not write files to disk)
R-Z- == all zero files. Measuring heavily duplicated files.
R-R- == random files. No duplication here, measuring throughput through all processing
stages, except writing to disk.
U- == borg create (2nd archive creation of unchanged input files, measure files cache speed)
The throughput value is kind of virtual here, it does not actually read the file.
U-Z- == needs to check the 2 all-zero chunks' existence in the repo.
U-R- == needs to check existence of a lot of different chunks in the repo.
D- == borg delete archive (delete last remaining archive, measure deletion + compaction)
D-Z- == few chunks to delete / few segments to compact/remove.
D-R- == many chunks to delete / many segments to compact/remove.
Please note that there might be quite some variance in these measurements.
Try multiple measurements and having a otherwise idle machine (and network, if you use it).
""")
subparser = benchmark_parsers.add_parser('crud', parents=[common_parser], add_help=False,
description=self.do_benchmark_crud.__doc__,
epilog=bench_crud_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='benchmarks borg CRUD (create, extract, update, delete).')
subparser.set_defaults(func=self.do_benchmark_crud)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to use for benchmark (must exist)')
subparser.add_argument('path', metavar='PATH', help='path were to create benchmark input data')
# borg break-lock
break_lock_epilog = process_epilog("""
This command breaks the repository and cache locks.
Please use carefully and only while no borg process (on any machine) is
trying to access the Cache or the Repository.
""")
subparser = subparsers.add_parser('break-lock', parents=[common_parser], add_help=False,
description=self.do_break_lock.__doc__,
epilog=break_lock_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='break repository and cache locks')
subparser.set_defaults(func=self.do_break_lock)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False),
help='repository for which to break the locks')
# borg check
check_epilog = process_epilog("""
The check command verifies the consistency of a repository and the corresponding archives.
First, the underlying repository data files are checked:
- For all segments the segment magic (header) is checked
- For all objects stored in the segments, all metadata (e.g. crc and size) and
all data is read. The read data is checked by size and CRC. Bit rot and other
types of accidental damage can be detected this way.
- If we are in repair mode and a integrity error is detected for a segment,
we try to recover as many objects from the segment as possible.
- In repair mode, it makes sure that the index is consistent with the data
stored in the segments.
- If you use a remote repo server via ssh:, the repo check is executed on the
repo server without causing significant network traffic.
- The repository check can be skipped using the ``--archives-only`` option.
- A repository check can be time consuming. Partial checks are possible with the ``--max-duration`` option.
Second, the consistency and correctness of the archive metadata is verified:
- Is the repo manifest present? If not, it is rebuilt from archive metadata
chunks (this requires reading and decrypting of all metadata and data).
- Check if archive metadata chunk is present. if not, remove archive from
manifest.
- For all files (items) in the archive, for all chunks referenced by these
files, check if chunk is present.
If a chunk is not present and we are in repair mode, replace it with a same-size
replacement chunk of zeros.
If a previously lost chunk reappears (e.g. via a later backup) and we are in
repair mode, the all-zero replacement chunk will be replaced by the correct chunk.
This requires reading of archive and file metadata, but not data.
- If we are in repair mode and we checked all the archives: delete orphaned
chunks from the repo.
- if you use a remote repo server via ssh:, the archive check is executed on
the client machine (because if encryption is enabled, the checks will require
decryption and this is always done client-side, because key access will be
required).
- The archive checks can be time consuming, they can be skipped using the
``--repository-only`` option.
The ``--max-duration`` option can be used to split a long-running repository check into multiple partial checks.
After the given number of seconds the check is interrupted. The next partial check will continue where the
previous one stopped, until the complete repository has been checked. Example: Assuming a full check took 7
hours, then running a daily check with --max-duration=3600 (1 hour) would result in one full check per week.
Attention: Partial checks can only do way less checks than a full check (only the CRC32 checks on segment file
entries are done) and cannot be combined with ``--repair``. Partial checks may therefore be useful only with very
large repositories where a full check would take too long. Doing a full repository check aborts a partial check;
the next partial check will start from the beginning.
The ``--verify-data`` option will perform a full integrity verification (as opposed to
checking the CRC32 of the segment) of data, which means reading the data from the
repository, decrypting and decompressing it. This is a cryptographic verification,
which will detect (accidental) corruption. For encrypted repositories it is
tamper-resistant as well, unless the attacker has access to the keys.
It is also very slow.
""")
subparser = subparsers.add_parser('check', parents=[common_parser], add_help=False,
description=self.do_check.__doc__,
epilog=check_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='verify repository')
subparser.set_defaults(func=self.do_check)
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
type=location_validator(),
help='repository or archive to check consistency of')
subparser.add_argument('--repository-only', dest='repo_only', action='store_true',
help='only perform repository checks')
subparser.add_argument('--archives-only', dest='archives_only', action='store_true',
help='only perform archives checks')
subparser.add_argument('--verify-data', dest='verify_data', action='store_true',
help='perform cryptographic archive data integrity verification '
'(conflicts with ``--repository-only``)')
subparser.add_argument('--repair', dest='repair', action='store_true',
help='attempt to repair any inconsistencies found')
subparser.add_argument('--save-space', dest='save_space', action='store_true',
help='work slower, but using less space')
subparser.add_argument('--max-duration', metavar='SECONDS', dest='max_duration',
type=int, default=0,
help='do only a partial repo check for max. SECONDS seconds (Default: unlimited)')
define_archive_filters_group(subparser)
# borg compact
compact_epilog = process_epilog("""
This command frees repository space by compacting segments.
Use this regularly to avoid running out of space - you do not need to use this
after each borg command though. It is especially useful after deleting archives,
because only compaction will really free repository space.
borg compact does not need a key, so it is possible to invoke it from the
client or also from the server.
Depending on the amount of segments that need compaction, it may take a while,
so consider using the ``--progress`` option.
A segment is compacted if the amount of saved space is above the percentage value
given by the ``--threshold`` option. If ommitted, a threshold of 10% is used.
When using ``--verbose``, borg will output an estimate of the freed space.
After upgrading borg (server) to 1.2+, you can use ``borg compact --cleanup-commits``
to clean up the numerous 17byte commit-only segments that borg 1.1 did not clean up
due to a bug. It is enough to do that once per repository.
See :ref:`separate_compaction` in Additional Notes for more details.
""")
subparser = subparsers.add_parser('compact', parents=[common_parser], add_help=False,
description=self.do_compact.__doc__,
epilog=compact_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='compact segment files / free space in repo')
subparser.set_defaults(func=self.do_compact)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False),
help='repository to compact')
subparser.add_argument('--cleanup-commits', dest='cleanup_commits', action='store_true',
help='cleanup commit-only 17-byte segment files')
subparser.add_argument('--threshold', metavar='PERCENT', dest='threshold',
type=int, default=10,
help='set minimum threshold for saved space in PERCENT (Default: 10)')
# borg config
config_epilog = process_epilog("""
This command gets and sets options in a local repository or cache config file.
For security reasons, this command only works on local repositories.
To delete a config value entirely, use ``--delete``. To list the values
of the configuration file or the default values, use ``--list``. To get and existing
key, pass only the key name. To set a key, pass both the key name and
the new value. Keys can be specified in the format "section.name" or
simply "name"; the section will default to "repository" and "cache" for
the repo and cache configs, respectively.
By default, borg config manipulates the repository config file. Using ``--cache``
edits the repository cache's config file instead.
""")
subparser = subparsers.add_parser('config', parents=[common_parser], add_help=False,
description=self.do_config.__doc__,
epilog=config_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='get and set configuration values')
subparser.set_defaults(func=self.do_config)
subparser.add_argument('-c', '--cache', dest='cache', action='store_true',
help='get and set values from the repo cache')
group = subparser.add_mutually_exclusive_group()
group.add_argument('-d', '--delete', dest='delete', action='store_true',
help='delete the key from the config file')
group.add_argument('-l', '--list', dest='list', action='store_true',
help='list the configuration of the repo')
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False, proto='file'),
help='repository to configure')
subparser.add_argument('name', metavar='NAME', nargs='?',
help='name of config key')
subparser.add_argument('value', metavar='VALUE', nargs='?',
help='new value for key')
# borg create
create_epilog = process_epilog("""
This command creates a backup archive containing all files found while recursively
traversing all paths specified. Paths are added to the archive as they are given,
that means if relative paths are desired, the command has to be run from the correct
directory.
When giving '-' as path, borg will read data from standard input and create a
file 'stdin' in the created archive from that data.
The archive will consume almost no disk space for files or parts of files that
have already been stored in other archives.
The archive name needs to be unique. It must not end in '.checkpoint' or
'.checkpoint.N' (with N being a number), because these names are used for
checkpoints and treated in special ways.
In the archive name, you may use the following placeholders:
{now}, {utcnow}, {fqdn}, {hostname}, {user} and some others.
Backup speed is increased by not reprocessing files that are already part of
existing archives and weren't modified. The detection of unmodified files is
done by comparing multiple file metadata values with previous values kept in
the files cache.
This comparison can operate in different modes as given by ``--files-cache``:
- ctime,size,inode (default)
- mtime,size,inode (default behaviour of borg versions older than 1.1.0rc4)
- ctime,size (ignore the inode number)
- mtime,size (ignore the inode number)
- rechunk,ctime (all files are considered modified - rechunk, cache ctime)
- rechunk,mtime (all files are considered modified - rechunk, cache mtime)
- disabled (disable the files cache, all files considered modified - rechunk)
inode number: better safety, but often unstable on network filesystems
Normally, detecting file modifications will take inode information into
consideration to improve the reliability of file change detection.
This is problematic for files located on sshfs and similar network file
systems which do not provide stable inode numbers, such files will always
be considered modified. You can use modes without `inode` in this case to
improve performance, but reliability of change detection might be reduced.
ctime vs. mtime: safety vs. speed
- ctime is a rather safe way to detect changes to a file (metadata and contents)
as it can not be set from userspace. But, a metadata-only change will already
update the ctime, so there might be some unnecessary chunking/hashing even
without content changes. Some filesystems do not support ctime (change time).
- mtime usually works and only updates if file contents were changed. But mtime
can be arbitrarily set from userspace, e.g. to set mtime back to the same value
it had before a content change happened. This can be used maliciously as well as
well-meant, but in both cases mtime based cache modes can be problematic.
The mount points of filesystems or filesystem snapshots should be the same for every
creation of a new archive to ensure fast operation. This is because the file cache that
is used to determine changed files quickly uses absolute filenames.
If this is not possible, consider creating a bind mount to a stable location.
The ``--progress`` option shows (from left to right) Original, Compressed and Deduplicated
(O, C and D, respectively), then the Number of files (N) processed so far, followed by
the currently processed path.
When using ``--stats``, you will get some statistics about how much data was
added - the "This Archive" deduplicated size there is most interesting as that is
how much your repository will grow. Please note that the "All archives" stats refer to
the state after creation. Also, the ``--stats`` and ``--dry-run`` options are mutually
exclusive because the data is not actually compressed and deduplicated during a dry run.
See the output of the "borg help patterns" command for more help on exclude patterns.
See the output of the "borg help placeholders" command for more help on placeholders.
.. man NOTES
The ``--exclude`` patterns are not like tar. In tar ``--exclude`` .bundler/gems will
exclude foo/.bundler/gems. In borg it will not, you need to use ``--exclude``
'\\*/.bundler/gems' to get the same effect. See ``borg help patterns`` for
more information.
In addition to using ``--exclude`` patterns, it is possible to use
``--exclude-if-present`` to specify the name of a filesystem object (e.g. a file
or folder name) which, when contained within another folder, will prevent the
containing folder from being backed up. By default, the containing folder and
all of its contents will be omitted from the backup. If, however, you wish to
only include the objects specified by ``--exclude-if-present`` in your backup,
and not include any other contents of the containing folder, this can be enabled
through using the ``--keep-exclude-tags`` option.
Item flags
++++++++++
``--list`` outputs a list of all files, directories and other
file system items it considered (no matter whether they had content changes
or not). For each item, it prefixes a single-letter flag that indicates type
and/or status of the item.
If you are interested only in a subset of that output, you can give e.g.
``--filter=AME`` and it will only show regular files with A, M or E status (see
below).
A uppercase character represents the status of a regular file relative to the
"files" cache (not relative to the repo -- this is an issue if the files cache
is not used). Metadata is stored in any case and for 'A' and 'M' also new data
chunks are stored. For 'U' all data chunks refer to already existing chunks.
- 'A' = regular file, added (see also :ref:`a_status_oddity` in the FAQ)
- 'M' = regular file, modified
- 'U' = regular file, unchanged
- 'C' = regular file, it changed while we backed it up
- 'E' = regular file, an error happened while accessing/reading *this* file
A lowercase character means a file type other than a regular file,
borg usually just stores their metadata:
- 'd' = directory
- 'b' = block device
- 'c' = char device
- 'h' = regular file, hardlink (to already seen inodes)
- 's' = symlink
- 'f' = fifo
Other flags used include:
- 'i' = backup data was read from standard input (stdin)
- '-' = dry run, item was *not* backed up
- 'x' = excluded, item was *not* backed up
- '?' = missing status code (if you see this, please file a bug report!)
""")
subparser = subparsers.add_parser('create', parents=[common_parser], add_help=False,
description=self.do_create.__doc__,
epilog=create_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='create backup')
subparser.set_defaults(func=self.do_create)
dryrun_group = subparser.add_mutually_exclusive_group()
dryrun_group.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='do not create a backup archive')
dryrun_group.add_argument('-s', '--stats', dest='stats', action='store_true',
help='print statistics for the created archive')
subparser.add_argument('--list', dest='output_list', action='store_true',
help='output verbose list of items (files, dirs, ...)')
subparser.add_argument('--filter', metavar='STATUSCHARS', dest='output_filter',
help='only display items with the given status characters (see description)')
subparser.add_argument('--json', action='store_true',
help='output stats as JSON. Implies ``--stats``.')
subparser.add_argument('--no-cache-sync', dest='no_cache_sync', action='store_true',
help='experimental: do not synchronize the cache. Implies not using the files cache.')
subparser.add_argument('--stdin-name', metavar='NAME', dest='stdin_name', default='stdin',
help='use NAME in archive for stdin data (default: "stdin")')
exclude_group = define_exclusion_group(subparser, tag_files=True)
exclude_group.add_argument('--exclude-nodump', dest='exclude_nodump', action='store_true',
help='exclude files flagged NODUMP')
fs_group = subparser.add_argument_group('Filesystem options')
fs_group.add_argument('-x', '--one-file-system', dest='one_file_system', action='store_true',
help='stay in the same file system and do not store mount points of other file systems')
fs_group.add_argument('--numeric-owner', dest='numeric_owner', action='store_true',
help='only store numeric user and group identifiers')
# --noatime is the default now and the flag is deprecated. args.noatime is not used any more.
# use --atime if you want to store the atime (default behaviour before borg 1.2.0a7)..
fs_group.add_argument('--noatime', dest='noatime', action='store_true',
help='do not store atime into archive')
fs_group.add_argument('--atime', dest='atime', action='store_true',
help='do store atime into archive')
fs_group.add_argument('--noctime', dest='noctime', action='store_true',
help='do not store ctime into archive')
fs_group.add_argument('--nobirthtime', dest='nobirthtime', action='store_true',
help='do not store birthtime (creation date) into archive')
fs_group.add_argument('--nobsdflags', dest='nobsdflags', action='store_true',
help='do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive')
fs_group.add_argument('--files-cache', metavar='MODE', dest='files_cache_mode',
type=FilesCacheMode, default=DEFAULT_FILES_CACHE_MODE_UI,
help='operate files cache in MODE. default: %s' % DEFAULT_FILES_CACHE_MODE_UI)
fs_group.add_argument('--read-special', dest='read_special', action='store_true',
help='open and read block and char device files as well as FIFOs as if they were '
'regular files. Also follows symlinks pointing to these kinds of files.')
archive_group = subparser.add_argument_group('Archive options')
archive_group.add_argument('--comment', dest='comment', metavar='COMMENT', type=CommentSpec, default='',
help='add a comment text to the archive')
archive_group.add_argument('--timestamp', metavar='TIMESTAMP', dest='timestamp',
type=timestamp, default=None,
help='manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). '
'Alternatively, give a reference file/directory.')
archive_group.add_argument('-c', '--checkpoint-interval', metavar='SECONDS', dest='checkpoint_interval',
type=int, default=1800,
help='write checkpoint every SECONDS seconds (Default: 1800)')
archive_group.add_argument('--chunker-params', metavar='PARAMS', dest='chunker_params',
type=ChunkerParams, default=CHUNKER_PARAMS,
help='specify the chunker parameters (ALGO, CHUNK_MIN_EXP, CHUNK_MAX_EXP, '
'HASH_MASK_BITS, HASH_WINDOW_SIZE). default: %s,%d,%d,%d,%d' % CHUNKER_PARAMS)
archive_group.add_argument('-C', '--compression', metavar='COMPRESSION', dest='compression',
type=CompressionSpec, default=CompressionSpec('lz4'),
help='select compression algorithm, see the output of the '
'"borg help compression" command for details.')
subparser.add_argument('location', metavar='ARCHIVE',
type=location_validator(archive=True),
help='name of archive to create (must be also a valid directory name)')
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths to archive')
# borg debug
debug_epilog = process_epilog("""
These commands are not intended for normal use and potentially very
dangerous if used incorrectly.
They exist to improve debugging capabilities without direct system access, e.g.
in case you ever run into some severe malfunction. Use them only if you know
what you are doing or if a trusted developer tells you what to do.""")
subparser = subparsers.add_parser('debug', parents=[mid_common_parser], add_help=False,
description='debugging command (not intended for normal use)',
epilog=debug_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='debugging command (not intended for normal use)')
debug_parsers = subparser.add_subparsers(title='required arguments', metavar='<command>')
subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser))
debug_info_epilog = process_epilog("""
This command displays some system information that might be useful for bug
reports and debugging problems. If a traceback happens, this information is
already appended at the end of the traceback.
""")
subparser = debug_parsers.add_parser('info', parents=[common_parser], add_help=False,
description=self.do_debug_info.__doc__,
epilog=debug_info_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='show system infos for debugging / bug reports (debug)')
subparser.set_defaults(func=self.do_debug_info)
debug_dump_archive_items_epilog = process_epilog("""
This command dumps raw (but decrypted and decompressed) archive items (only metadata) to files.
""")
subparser = debug_parsers.add_parser('dump-archive-items', parents=[common_parser], add_help=False,
description=self.do_debug_dump_archive_items.__doc__,
epilog=debug_dump_archive_items_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='dump archive items (metadata) (debug)')
subparser.set_defaults(func=self.do_debug_dump_archive_items)
subparser.add_argument('location', metavar='ARCHIVE',
type=location_validator(archive=True),
help='archive to dump')
debug_dump_archive_epilog = process_epilog("""
This command dumps all metadata of an archive in a decoded form to a file.
""")
subparser = debug_parsers.add_parser('dump-archive', parents=[common_parser], add_help=False,
description=self.do_debug_dump_archive.__doc__,
epilog=debug_dump_archive_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='dump decoded archive metadata (debug)')
subparser.set_defaults(func=self.do_debug_dump_archive)
subparser.add_argument('location', metavar='ARCHIVE',
type=location_validator(archive=True),
help='archive to dump')
subparser.add_argument('path', metavar='PATH', type=str,
help='file to dump data into')
debug_dump_manifest_epilog = process_epilog("""
This command dumps manifest metadata of a repository in a decoded form to a file.
""")
subparser = debug_parsers.add_parser('dump-manifest', parents=[common_parser], add_help=False,
description=self.do_debug_dump_manifest.__doc__,
epilog=debug_dump_manifest_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='dump decoded repository metadata (debug)')
subparser.set_defaults(func=self.do_debug_dump_manifest)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to dump')
subparser.add_argument('path', metavar='PATH', type=str,
help='file to dump data into')
debug_dump_repo_objs_epilog = process_epilog("""
This command dumps raw (but decrypted and decompressed) repo objects to files.
""")
subparser = debug_parsers.add_parser('dump-repo-objs', parents=[common_parser], add_help=False,
description=self.do_debug_dump_repo_objs.__doc__,
epilog=debug_dump_repo_objs_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='dump repo objects (debug)')
subparser.set_defaults(func=self.do_debug_dump_repo_objs)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to dump')
subparser.add_argument('--ghost', dest='ghost', action='store_true',
help='dump all segment file contents, including deleted/uncommitted objects and commits.')
debug_search_repo_objs_epilog = process_epilog("""
This command searches raw (but decrypted and decompressed) repo objects for a specific bytes sequence.
""")
subparser = debug_parsers.add_parser('search-repo-objs', parents=[common_parser], add_help=False,
description=self.do_debug_search_repo_objs.__doc__,
epilog=debug_search_repo_objs_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='search repo objects (debug)')
subparser.set_defaults(func=self.do_debug_search_repo_objs)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to search')
subparser.add_argument('wanted', metavar='WANTED', type=str,
help='term to search the repo for, either 0x1234abcd hex term or a string')
debug_get_obj_epilog = process_epilog("""
This command gets an object from the repository.
""")
subparser = debug_parsers.add_parser('get-obj', parents=[common_parser], add_help=False,
description=self.do_debug_get_obj.__doc__,
epilog=debug_get_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='get object from repository (debug)')
subparser.set_defaults(func=self.do_debug_get_obj)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to use')
subparser.add_argument('id', metavar='ID', type=str,
help='hex object ID to get from the repo')
subparser.add_argument('path', metavar='PATH', type=str,
help='file to write object data into')
debug_put_obj_epilog = process_epilog("""
This command puts objects into the repository.
""")
subparser = debug_parsers.add_parser('put-obj', parents=[common_parser], add_help=False,
description=self.do_debug_put_obj.__doc__,
epilog=debug_put_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='put object to repository (debug)')
subparser.set_defaults(func=self.do_debug_put_obj)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to use')
subparser.add_argument('paths', metavar='PATH', nargs='+', type=str,
help='file(s) to read and create object(s) from')
debug_delete_obj_epilog = process_epilog("""
This command deletes objects from the repository.
""")
subparser = debug_parsers.add_parser('delete-obj', parents=[common_parser], add_help=False,
description=self.do_debug_delete_obj.__doc__,
epilog=debug_delete_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='delete object from repository (debug)')
subparser.set_defaults(func=self.do_debug_delete_obj)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to use')
subparser.add_argument('ids', metavar='IDs', nargs='+', type=str,
help='hex object ID(s) to delete from the repo')
debug_refcount_obj_epilog = process_epilog("""
This command displays the reference count for objects from the repository.
""")
subparser = debug_parsers.add_parser('refcount-obj', parents=[common_parser], add_help=False,
description=self.do_debug_refcount_obj.__doc__,
epilog=debug_refcount_obj_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='show refcount for object from repository (debug)')
subparser.set_defaults(func=self.do_debug_refcount_obj)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to use')
subparser.add_argument('ids', metavar='IDs', nargs='+', type=str,
help='hex object ID(s) to show refcounts for')
debug_convert_profile_epilog = process_epilog("""
Convert a Borg profile to a Python cProfile compatible profile.
""")
subparser = debug_parsers.add_parser('convert-profile', parents=[common_parser], add_help=False,
description=self.do_debug_convert_profile.__doc__,
epilog=debug_convert_profile_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='convert Borg profile to Python profile (debug)')
subparser.set_defaults(func=self.do_debug_convert_profile)
subparser.add_argument('input', metavar='INPUT', type=argparse.FileType('rb'),
help='Borg profile')
subparser.add_argument('output', metavar='OUTPUT', type=argparse.FileType('wb'),
help='Output file')
# borg delete
delete_epilog = process_epilog("""
This command deletes an archive from the repository or the complete repository.
Important: When deleting archives, repository disk space is **not** freed until
you run ``borg compact``.
If you delete the complete repository, the local cache for it (if any) is
also deleted. Alternatively, you can delete just the local cache with the
``--cache-only`` option.
When using ``--stats``, you will get some statistics about how much data was
deleted - the "Deleted data" deduplicated size there is most interesting as
that is how much your repository will shrink.
Please note that the "All archives" stats refer to the state after deletion.
You can delete multiple archives by specifying their common prefix, if they
have one, using the ``--prefix PREFIX`` option. You can also specify a shell
pattern to match multiple archives using the ``--glob-archives GLOB`` option
(for more info on these patterns, see ``borg help patterns``). Note that these
two options are mutually exclusive.
To avoid accidentally deleting archives, especially when using glob patterns,
it might be helpful to use the ``--dry-run`` to test out the command without
actually making any changes to the repository.
""")
subparser = subparsers.add_parser('delete', parents=[common_parser], add_help=False,
description=self.do_delete.__doc__,
epilog=delete_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='delete archive')
subparser.set_defaults(func=self.do_delete)
subparser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='do not change repository')
subparser.add_argument('-s', '--stats', dest='stats', action='store_true',
help='print statistics for the deleted archive')
subparser.add_argument('--cache-only', dest='cache_only', action='store_true',
help='delete only the local cache for the given repository')
subparser.add_argument('--force', dest='forced',
action='count', default=0,
help='force deletion of corrupted archives, '
'use ``--force --force`` in case ``--force`` does not work.')
subparser.add_argument('--save-space', dest='save_space', action='store_true',
help='work slower, but using less space')
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
type=location_validator(),
help='repository or archive to delete')
subparser.add_argument('archives', metavar='ARCHIVE', nargs='*',
help='archives to delete')
define_archive_filters_group(subparser)
# borg diff
diff_epilog = process_epilog("""
This command finds differences (file contents, user/group/mode) between archives.
A repository location and an archive name must be specified for REPO::ARCHIVE1.
ARCHIVE2 is just another archive name in same repository (no repository location
allowed).
For archives created with Borg 1.1 or newer diff automatically detects whether
the archives are created with the same chunker params. If so, only chunk IDs
are compared, which is very fast.
For archives prior to Borg 1.1 chunk contents are compared by default.
If you did not create the archives with different chunker params,
pass ``--same-chunker-params``.
Note that the chunker params changed from Borg 0.xx to 1.0.
See the output of the "borg help patterns" command for more help on exclude patterns.
""")
subparser = subparsers.add_parser('diff', parents=[common_parser], add_help=False,
description=self.do_diff.__doc__,
epilog=diff_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='find differences in archive contents')
subparser.set_defaults(func=self.do_diff)
subparser.add_argument('--numeric-owner', dest='numeric_owner', action='store_true',
help='only consider numeric user and group identifiers')
subparser.add_argument('--same-chunker-params', dest='same_chunker_params', action='store_true',
help='Override check of chunker parameters.')
subparser.add_argument('--sort', dest='sort', action='store_true',
help='Sort the output lines by file path.')
subparser.add_argument('location', metavar='REPO::ARCHIVE1',
type=location_validator(archive=True),
help='repository location and ARCHIVE1 name')
subparser.add_argument('archive2', metavar='ARCHIVE2',
type=archivename_validator(),
help='ARCHIVE2 name (no repository location allowed)')
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths of items inside the archives to compare; patterns are supported')
define_exclusion_group(subparser)
# borg export-tar
export_tar_epilog = process_epilog("""
This command creates a tarball from an archive.
When giving '-' as the output FILE, Borg will write a tar stream to standard output.
By default (``--tar-filter=auto``) Borg will detect whether the FILE should be compressed
based on its file extension and pipe the tarball through an appropriate filter
before writing it to FILE:
- .tar.gz: gzip
- .tar.bz2: bzip2
- .tar.xz: xz
Alternatively a ``--tar-filter`` program may be explicitly specified. It should
read the uncompressed tar stream from stdin and write a compressed/filtered
tar stream to stdout.
The generated tarball uses the GNU tar format.
export-tar is a lossy conversion:
BSD flags, ACLs, extended attributes (xattrs), atime and ctime are not exported.
Timestamp resolution is limited to whole seconds, not the nanosecond resolution
otherwise supported by Borg.
A ``--sparse`` option (as found in borg extract) is not supported.
By default the entire archive is extracted but a subset of files and directories
can be selected by passing a list of ``PATHs`` as arguments.
The file selection can further be restricted by using the ``--exclude`` option.
See the output of the "borg help patterns" command for more help on exclude patterns.
``--progress`` can be slower than no progress display, since it makes one additional
pass over the archive metadata.
""")
subparser = subparsers.add_parser('export-tar', parents=[common_parser], add_help=False,
description=self.do_export_tar.__doc__,
epilog=export_tar_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='create tarball from archive')
subparser.set_defaults(func=self.do_export_tar)
subparser.add_argument('--tar-filter', dest='tar_filter', default='auto',
help='filter program to pipe data through')
subparser.add_argument('--list', dest='output_list', action='store_true',
help='output verbose list of items (files, dirs, ...)')
subparser.add_argument('location', metavar='ARCHIVE',
type=location_validator(archive=True),
help='archive to export')
subparser.add_argument('tarfile', metavar='FILE',
help='output tar file. "-" to write to stdout instead.')
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths to extract; patterns are supported')
define_exclusion_group(subparser, strip_components=True)
# borg extract
extract_epilog = process_epilog("""
This command extracts the contents of an archive. By default the entire
archive is extracted but a subset of files and directories can be selected
by passing a list of ``PATHs`` as arguments. The file selection can further
be restricted by using the ``--exclude`` option.
See the output of the "borg help patterns" command for more help on exclude patterns.
By using ``--dry-run``, you can do all extraction steps except actually writing the
output data: reading metadata and data chunks from the repo, checking the hash/hmac,
decrypting, decompressing.
``--progress`` can be slower than no progress display, since it makes one additional
pass over the archive metadata.
.. note::
Currently, extract always writes into the current working directory ("."),
so make sure you ``cd`` to the right place before calling ``borg extract``.
""")
subparser = subparsers.add_parser('extract', parents=[common_parser], add_help=False,
description=self.do_extract.__doc__,
epilog=extract_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='extract archive contents')
subparser.set_defaults(func=self.do_extract)
subparser.add_argument('--list', dest='output_list', action='store_true',
help='output verbose list of items (files, dirs, ...)')
subparser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='do not actually change any files')
subparser.add_argument('--numeric-owner', dest='numeric_owner', action='store_true',
help='only obey numeric user and group identifiers')
subparser.add_argument('--nobsdflags', dest='nobsdflags', action='store_true',
help='do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE)')
subparser.add_argument('--stdout', dest='stdout', action='store_true',
help='write all extracted data to stdout')
subparser.add_argument('--sparse', dest='sparse', action='store_true',
help='create holes in output sparse file from all-zero chunks')
subparser.add_argument('location', metavar='ARCHIVE',
type=location_validator(archive=True),
help='archive to extract')
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths to extract; patterns are supported')
define_exclusion_group(subparser, strip_components=True)
# borg help
subparser = subparsers.add_parser('help', parents=[common_parser], add_help=False,
description='Extra help')
subparser.add_argument('--epilog-only', dest='epilog_only', action='store_true')
subparser.add_argument('--usage-only', dest='usage_only', action='store_true')
subparser.set_defaults(func=functools.partial(self.do_help, parser, subparsers.choices))
subparser.add_argument('topic', metavar='TOPIC', type=str, nargs='?',
help='additional help on TOPIC')
# borg info
info_epilog = process_epilog("""
This command displays detailed information about the specified archive or repository.
Please note that the deduplicated sizes of the individual archives do not add
up to the deduplicated size of the repository ("all archives"), because the two
are meaning different things:
This archive / deduplicated size = amount of data stored ONLY for this archive
= unique chunks of this archive.
All archives / deduplicated size = amount of data stored in the repo
= all chunks in the repository.
Borg archives can only contain a limited amount of file metadata.
The size of an archive relative to this limit depends on a number of factors,
mainly the number of files, the lengths of paths and other metadata stored for files.
This is shown as *utilization of maximum supported archive size*.
""")
subparser = subparsers.add_parser('info', parents=[common_parser], add_help=False,
description=self.do_info.__doc__,
epilog=info_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='show repository or archive information')
subparser.set_defaults(func=self.do_info)
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
type=location_validator(),
help='repository or archive to display information about')
subparser.add_argument('--json', action='store_true',
help='format output as JSON')
define_archive_filters_group(subparser)
# borg init
init_epilog = process_epilog("""
This command initializes an empty repository. A repository is a filesystem
directory containing the deduplicated data from zero or more archives.
Encryption can be enabled at repository init time. It cannot be changed later.
It is not recommended to work without encryption. Repository encryption protects
you e.g. against the case that an attacker has access to your backup repository.
But be careful with the key / the passphrase:
If you want "passphrase-only" security, use one of the repokey modes. The
key will be stored inside the repository (in its "config" file). In above
mentioned attack scenario, the attacker will have the key (but not the
passphrase).
If you want "passphrase and having-the-key" security, use one of the keyfile
modes. The key will be stored in your home directory (in .config/borg/keys).
In the attack scenario, the attacker who has just access to your repo won't
have the key (and also not the passphrase).
Make a backup copy of the key file (keyfile mode) or repo config file
(repokey mode) and keep it at a safe place, so you still have the key in
case it gets corrupted or lost. Also keep the passphrase at a safe place.
The backup that is encrypted with that key won't help you with that, of course.
Make sure you use a good passphrase. Not too short, not too simple. The real
encryption / decryption key is encrypted with / locked by your passphrase.
If an attacker gets your key, he can't unlock and use it without knowing the
passphrase.
Be careful with special or non-ascii characters in your passphrase:
- Borg processes the passphrase as unicode (and encodes it as utf-8),
so it does not have problems dealing with even the strangest characters.
- BUT: that does not necessarily apply to your OS / VM / keyboard configuration.
So better use a long passphrase made from simple ascii chars than one that
includes non-ascii stuff or characters that are hard/impossible to enter on
a different keyboard layout.
You can change your passphrase for existing repos at any time, it won't affect
the encryption/decryption key or other secrets.
Encryption modes
++++++++++++++++
.. nanorst: inline-fill
+----------+---------------+------------------------+--------------------------+
| Hash/MAC | Not encrypted | Not encrypted, | Encrypted (AEAD w/ AES) |
| | no auth | but authenticated | and authenticated |
+----------+---------------+------------------------+--------------------------+
| SHA-256 | none | `authenticated` | repokey |
| | | | keyfile |
+----------+---------------+------------------------+--------------------------+
| BLAKE2b | n/a | `authenticated-blake2` | `repokey-blake2` |
| | | | `keyfile-blake2` |
+----------+---------------+------------------------+--------------------------+
.. nanorst: inline-replace
`Marked modes` are new in Borg 1.1 and are not backwards-compatible with Borg 1.0.x.
On modern Intel/AMD CPUs (except very cheap ones), AES is usually
hardware-accelerated.
BLAKE2b is faster than SHA256 on Intel/AMD 64-bit CPUs
(except AMD Ryzen and future CPUs with SHA extensions),
which makes `authenticated-blake2` faster than `none` and `authenticated`.
On modern ARM CPUs, NEON provides hardware acceleration for SHA256 making it faster
than BLAKE2b-256 there. NEON accelerates AES as well.
Hardware acceleration is always used automatically when available.
`repokey` and `keyfile` use AES-CTR-256 for encryption and HMAC-SHA256 for
authentication in an encrypt-then-MAC (EtM) construction. The chunk ID hash
is HMAC-SHA256 as well (with a separate key).
These modes are compatible with Borg 1.0.x.
`repokey-blake2` and `keyfile-blake2` are also authenticated encryption modes,
but use BLAKE2b-256 instead of HMAC-SHA256 for authentication. The chunk ID
hash is a keyed BLAKE2b-256 hash.
These modes are new and *not* compatible with Borg 1.0.x.
`authenticated` mode uses no encryption, but authenticates repository contents
through the same HMAC-SHA256 hash as the `repokey` and `keyfile` modes (it uses it
as the chunk ID hash). The key is stored like `repokey`.
This mode is new and *not* compatible with Borg 1.0.x.
`authenticated-blake2` is like `authenticated`, but uses the keyed BLAKE2b-256 hash
from the other blake2 modes.
This mode is new and *not* compatible with Borg 1.0.x.
`none` mode uses no encryption and no authentication. It uses SHA256 as chunk
ID hash. Not recommended, rather consider using an authenticated or
authenticated/encrypted mode. This mode has possible denial-of-service issues
when running ``borg create`` on contents controlled by an attacker.
Use it only for new repositories where no encryption is wanted **and** when compatibility
with 1.0.x is important. If compatibility with 1.0.x is not important, use
`authenticated-blake2` or `authenticated` instead.
This mode is compatible with Borg 1.0.x.
""")
subparser = subparsers.add_parser('init', parents=[common_parser], add_help=False,
description=self.do_init.__doc__, epilog=init_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='initialize empty repository')
subparser.set_defaults(func=self.do_init)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False),
help='repository to create')
subparser.add_argument('-e', '--encryption', metavar='MODE', dest='encryption', required=True,
choices=key_argument_names(),
help='select encryption key mode **(required)**')
subparser.add_argument('--append-only', dest='append_only', action='store_true',
help='create an append-only mode repository. Note that this only affects '
'the low level structure of the repository, and running `delete` '
'or `prune` will still be allowed. See :ref:`append_only_mode` in '
'Additional Notes for more details.')
subparser.add_argument('--storage-quota', metavar='QUOTA', dest='storage_quota', default=None,
type=parse_storage_quota,
help='Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota.')
subparser.add_argument('--make-parent-dirs', dest='make_parent_dirs', action='store_true',
help='create the parent directories of the repository directory, if they are missing.')
# borg key
subparser = subparsers.add_parser('key', parents=[mid_common_parser], add_help=False,
description="Manage a keyfile or repokey of a repository",
epilog="",
formatter_class=argparse.RawDescriptionHelpFormatter,
help='manage repository key')
key_parsers = subparser.add_subparsers(title='required arguments', metavar='<command>')
subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser))
key_export_epilog = process_epilog("""
If repository encryption is used, the repository is inaccessible
without the key. This command allows one to backup this essential key.
Note that the backup produced does not include the passphrase itself
(i.e. the exported key stays encrypted). In order to regain access to a
repository, one needs both the exported key and the original passphrase.
There are three backup formats. The normal backup format is suitable for
digital storage as a file. The ``--paper`` backup format is optimized
for printing and typing in while importing, with per line checks to
reduce problems with manual input. The ``--qr-html`` creates a printable
HTML template with a QR code and a copy of the ``--paper``-formatted key.
For repositories using keyfile encryption the key is saved locally
on the system that is capable of doing backups. To guard against loss
of this key, the key needs to be backed up independently of the main
data backup.
For repositories using the repokey encryption the key is saved in the
repository in the config file. A backup is thus not strictly needed,
but guards against the repository becoming inaccessible if the file
is damaged for some reason.
""")
subparser = key_parsers.add_parser('export', parents=[common_parser], add_help=False,
description=self.do_key_export.__doc__,
epilog=key_export_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='export repository key for backup')
subparser.set_defaults(func=self.do_key_export)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False))
subparser.add_argument('path', metavar='PATH', nargs='?', type=str,
help='where to store the backup')
subparser.add_argument('--paper', dest='paper', action='store_true',
help='Create an export suitable for printing and later type-in')
subparser.add_argument('--qr-html', dest='qr', action='store_true',
help='Create an html file suitable for printing and later type-in or qr scan')
key_import_epilog = process_epilog("""
This command restores a key previously backed up with the export command.
If the ``--paper`` option is given, the import will be an interactive
process in which each line is checked for plausibility before
proceeding to the next line. For this format PATH must not be given.
""")
subparser = key_parsers.add_parser('import', parents=[common_parser], add_help=False,
description=self.do_key_import.__doc__,
epilog=key_import_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='import repository key from backup')
subparser.set_defaults(func=self.do_key_import)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False))
subparser.add_argument('path', metavar='PATH', nargs='?', type=str,
help='path to the backup (\'-\' to read from stdin)')
subparser.add_argument('--paper', dest='paper', action='store_true',
help='interactively import from a backup done with ``--paper``')
change_passphrase_epilog = process_epilog("""
The key files used for repository encryption are optionally passphrase
protected. This command can be used to change this passphrase.
Please note that this command only changes the passphrase, but not any
secret protected by it (like e.g. encryption/MAC keys or chunker seed).
Thus, changing the passphrase after passphrase and borg key got compromised
does not protect future (nor past) backups to the same repository.
""")
subparser = key_parsers.add_parser('change-passphrase', parents=[common_parser], add_help=False,
description=self.do_change_passphrase.__doc__,
epilog=change_passphrase_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='change repository passphrase')
subparser.set_defaults(func=self.do_change_passphrase)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False))
migrate_to_repokey_epilog = process_epilog("""
This command migrates a repository from passphrase mode (removed in Borg 1.0)
to repokey mode.
You will be first asked for the repository passphrase (to open it in passphrase
mode). This is the same passphrase as you used to use for this repo before 1.0.
It will then derive the different secrets from this passphrase.
Then you will be asked for a new passphrase (twice, for safety). This
passphrase will be used to protect the repokey (which contains these same
secrets in encrypted form). You may use the same passphrase as you used to
use, but you may also use a different one.
After migrating to repokey mode, you can change the passphrase at any time.
But please note: the secrets will always stay the same and they could always
be derived from your (old) passphrase-mode passphrase.
""")
subparser = key_parsers.add_parser('migrate-to-repokey', parents=[common_parser], add_help=False,
description=self.do_migrate_to_repokey.__doc__,
epilog=migrate_to_repokey_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='migrate passphrase-mode repository to repokey')
subparser.set_defaults(func=self.do_migrate_to_repokey)
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False))
# borg list
list_epilog = process_epilog("""
This command lists the contents of a repository or an archive.
See the "borg help patterns" command for more help on exclude patterns.
.. man NOTES
The following keys are available for ``--format``:
""") + BaseFormatter.keys_help() + textwrap.dedent("""
Keys for listing repository archives:
""") + ArchiveFormatter.keys_help() + textwrap.dedent("""
Keys for listing archive files:
""") + ItemFormatter.keys_help()
subparser = subparsers.add_parser('list', parents=[common_parser], add_help=False,
description=self.do_list.__doc__,
epilog=list_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='list archive or repository contents')
subparser.set_defaults(func=self.do_list)
subparser.add_argument('--short', dest='short', action='store_true',
help='only print file/directory names, nothing else')
subparser.add_argument('--format', '--list-format', metavar='FORMAT', dest='format',
help='specify format for file listing '
'(default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}")')
subparser.add_argument('--json', action='store_true',
help='Only valid for listing repository contents. Format output as JSON. '
'The form of ``--format`` is ignored, '
'but keys used in it are added to the JSON output. '
'Some keys are always present. Note: JSON can only represent text. '
'A "barchive" key is therefore not available.')
subparser.add_argument('--json-lines', action='store_true',
help='Only valid for listing archive contents. Format output as JSON Lines. '
'The form of ``--format`` is ignored, '
'but keys used in it are added to the JSON output. '
'Some keys are always present. Note: JSON can only represent text. '
'A "bpath" key is therefore not available.')
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
type=location_validator(),
help='repository or archive to list contents of')
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths to list; patterns are supported')
define_archive_filters_group(subparser)
define_exclusion_group(subparser)
subparser = subparsers.add_parser('mount', parents=[common_parser], add_help=False,
description=self.do_mount.__doc__,
epilog=mount_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='mount repository')
define_borg_mount(subparser)
# borg prune
prune_epilog = process_epilog("""
The prune command prunes a repository by deleting all archives not matching
any of the specified retention options.
Important: Repository disk space is **not** freed until you run ``borg compact``.
This command is normally used by automated backup scripts wanting to keep a
certain number of historic backups.
Also, prune automatically removes checkpoint archives (incomplete archives left
behind by interrupted backup runs) except if the checkpoint is the latest
archive (and thus still needed). Checkpoint archives are not considered when
comparing archive counts against the retention limits (``--keep-X``).
If a prefix is set with -P, then only archives that start with the prefix are
considered for deletion and only those archives count towards the totals
specified by the rules.
Otherwise, *all* archives in the repository are candidates for deletion!
There is no automatic distinction between archives representing different
contents. These need to be distinguished by specifying matching prefixes.
If you have multiple sequences of archives with different data sets (e.g.
from different machines) in one shared repository, use one prune call per
data set that matches only the respective archives using the -P option.
The ``--keep-within`` option takes an argument of the form "<int><char>",
where char is "H", "d", "w", "m", "y". For example, ``--keep-within 2d`` means
to keep all archives that were created within the past 48 hours.
"1m" is taken to mean "31d". The archives kept with this option do not
count towards the totals specified by any other options.
A good procedure is to thin out more and more the older your backups get.
As an example, ``--keep-daily 7`` means to keep the latest backup on each day,
up to 7 most recent days with backups (days without backups do not count).
The rules are applied from secondly to yearly, and backups selected by previous
rules do not count towards those of later rules. The time that each backup
starts is used for pruning purposes. Dates and times are interpreted in
the local timezone, and weeks go from Monday to Sunday. Specifying a
negative number of archives to keep means that there is no limit.
The ``--keep-last N`` option is doing the same as ``--keep-secondly N`` (and it will
keep the last N archives under the assumption that you do not create more than one
backup archive in the same second).
When using ``--stats``, you will get some statistics about how much data was
deleted - the "Deleted data" deduplicated size there is most interesting as
that is how much your repository will shrink.
Please note that the "All archives" stats refer to the state after pruning.
""")
subparser = subparsers.add_parser('prune', parents=[common_parser], add_help=False,
description=self.do_prune.__doc__,
epilog=prune_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='prune archives')
subparser.set_defaults(func=self.do_prune)
subparser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='do not change repository')
subparser.add_argument('--force', dest='forced', action='store_true',
help='force pruning of corrupted archives')
subparser.add_argument('-s', '--stats', dest='stats', action='store_true',
help='print statistics for the deleted archive')
subparser.add_argument('--list', dest='output_list', action='store_true',
help='output verbose list of archives it keeps/prunes')
subparser.add_argument('--keep-within', metavar='INTERVAL', dest='within', type=interval,
help='keep all archives within this time interval')
subparser.add_argument('--keep-last', '--keep-secondly', dest='secondly', type=int, default=0,
help='number of secondly archives to keep')
subparser.add_argument('--keep-minutely', dest='minutely', type=int, default=0,
help='number of minutely archives to keep')
subparser.add_argument('-H', '--keep-hourly', dest='hourly', type=int, default=0,
help='number of hourly archives to keep')
subparser.add_argument('-d', '--keep-daily', dest='daily', type=int, default=0,
help='number of daily archives to keep')
subparser.add_argument('-w', '--keep-weekly', dest='weekly', type=int, default=0,
help='number of weekly archives to keep')
subparser.add_argument('-m', '--keep-monthly', dest='monthly', type=int, default=0,
help='number of monthly archives to keep')
subparser.add_argument('-y', '--keep-yearly', dest='yearly', type=int, default=0,
help='number of yearly archives to keep')
define_archive_filters_group(subparser, sort_by=False, first_last=False)
subparser.add_argument('--save-space', dest='save_space', action='store_true',
help='work slower, but using less space')
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False),
help='repository to prune')
# borg recreate
recreate_epilog = process_epilog("""
Recreate the contents of existing archives.
This is an *experimental* feature. Do *not* use this on your only backup.
Important: Repository disk space is **not** freed until you run ``borg compact``.
``--exclude``, ``--exclude-from``, ``--exclude-if-present``, ``--keep-exclude-tags``, and PATH
have the exact same semantics as in "borg create". If PATHs are specified the
resulting archive will only contain files from these PATHs.
Note that all paths in an archive are relative, therefore absolute patterns/paths
will *not* match (``--exclude``, ``--exclude-from``, PATHs).
``--recompress`` allows one to change the compression of existing data in archives.
Due to how Borg stores compressed size information this might display
incorrect information for archives that were not recreated at the same time.
There is no risk of data loss by this.
``--chunker-params`` will re-chunk all files in the archive, this can be
used to have upgraded Borg 0.xx or Attic archives deduplicate with
Borg 1.x archives.
**USE WITH CAUTION.**
Depending on the PATHs and patterns given, recreate can be used to permanently
delete files from archives.
When in doubt, use ``--dry-run --verbose --list`` to see how patterns/PATHS are
interpreted.
The archive being recreated is only removed after the operation completes. The
archive that is built during the operation exists at the same time at
"<ARCHIVE>.recreate". The new archive will have a different archive ID.
With ``--target`` the original archive is not replaced, instead a new archive is created.
When rechunking (or recompressing), space usage can be substantial - expect
at least the entire deduplicated size of the archives using the previous
chunker (or compression) params.
If you recently ran borg check --repair and it had to fix lost chunks with all-zero
replacement chunks, please first run another backup for the same data and re-run
borg check --repair afterwards to heal any archives that had lost chunks which are
still generated from the input data.
Important: running borg recreate to re-chunk will remove the chunks_healthy
metadata of all items with replacement chunks, so healing will not be possible
any more after re-chunking (it is also unlikely it would ever work: due to the
change of chunking parameters, the missing chunk likely will never be seen again
even if you still have the data that produced it).
""")
subparser = subparsers.add_parser('recreate', parents=[common_parser], add_help=False,
description=self.do_recreate.__doc__,
epilog=recreate_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=self.do_recreate.__doc__)
subparser.set_defaults(func=self.do_recreate)
subparser.add_argument('--list', dest='output_list', action='store_true',
help='output verbose list of items (files, dirs, ...)')
subparser.add_argument('--filter', metavar='STATUSCHARS', dest='output_filter',
help='only display items with the given status characters (listed in borg create --help)')
subparser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='do not change anything')
subparser.add_argument('-s', '--stats', dest='stats', action='store_true',
help='print statistics at end')
define_exclusion_group(subparser, tag_files=True)
archive_group = subparser.add_argument_group('Archive options')
archive_group.add_argument('--target', dest='target', metavar='TARGET', default=None,
type=archivename_validator(),
help='create a new archive with the name ARCHIVE, do not replace existing archive '
'(only applies for a single archive)')
archive_group.add_argument('-c', '--checkpoint-interval', dest='checkpoint_interval',
type=int, default=1800, metavar='SECONDS',
help='write checkpoint every SECONDS seconds (Default: 1800)')
archive_group.add_argument('--comment', dest='comment', metavar='COMMENT', type=CommentSpec, default=None,
help='add a comment text to the archive')
archive_group.add_argument('--timestamp', metavar='TIMESTAMP', dest='timestamp',
type=timestamp, default=None,
help='manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). '
'alternatively, give a reference file/directory.')
archive_group.add_argument('-C', '--compression', metavar='COMPRESSION', dest='compression',
type=CompressionSpec, default=CompressionSpec('lz4'),
help='select compression algorithm, see the output of the '
'"borg help compression" command for details.')
archive_group.add_argument('--recompress', metavar='MODE', dest='recompress', nargs='?',
default='never', const='if-different', choices=('never', 'if-different', 'always'),
help='recompress data chunks according to ``--compression``. '
'MODE `if-different`: '
'recompress if current compression is with a different compression algorithm '
'(the level is not considered). '
'MODE `always`: '
'recompress even if current compression is with the same compression algorithm '
'(use this to change the compression level). '
'MODE `never` (default): '
'do not recompress.')
archive_group.add_argument('--chunker-params', metavar='PARAMS', dest='chunker_params',
type=ChunkerParams, default=CHUNKER_PARAMS,
help='specify the chunker parameters (ALGO, CHUNK_MIN_EXP, CHUNK_MAX_EXP, '
'HASH_MASK_BITS, HASH_WINDOW_SIZE) or `default` to use the current defaults. '
'default: %s,%d,%d,%d,%d' % CHUNKER_PARAMS)
subparser.add_argument('location', metavar='REPOSITORY_OR_ARCHIVE', nargs='?', default='',
type=location_validator(),
help='repository or archive to recreate')
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
help='paths to recreate; patterns are supported')
# borg rename
rename_epilog = process_epilog("""
This command renames an archive in the repository.
This results in a different archive ID.
""")
subparser = subparsers.add_parser('rename', parents=[common_parser], add_help=False,
description=self.do_rename.__doc__,
epilog=rename_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='rename archive')
subparser.set_defaults(func=self.do_rename)
subparser.add_argument('location', metavar='ARCHIVE',
type=location_validator(archive=True),
help='archive to rename')
subparser.add_argument('name', metavar='NEWNAME',
type=archivename_validator(),
help='the new archive name to use')
# borg serve
serve_epilog = process_epilog("""
This command starts a repository server process. This command is usually not used manually.
""")
subparser = subparsers.add_parser('serve', parents=[common_parser], add_help=False,
description=self.do_serve.__doc__, epilog=serve_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='start repository server process')
subparser.set_defaults(func=self.do_serve)
subparser.add_argument('--restrict-to-path', metavar='PATH', dest='restrict_to_paths', action='append',
help='restrict repository access to PATH. '
'Can be specified multiple times to allow the client access to several directories. '
'Access to all sub-directories is granted implicitly; PATH doesn\'t need to directly point to a repository.')
subparser.add_argument('--restrict-to-repository', metavar='PATH', dest='restrict_to_repositories', action='append',
help='restrict repository access. Only the repository located at PATH '
'(no sub-directories are considered) is accessible. '
'Can be specified multiple times to allow the client access to several repositories. '
'Unlike ``--restrict-to-path`` sub-directories are not accessible; '
'PATH needs to directly point at a repository location. '
'PATH may be an empty directory or the last element of PATH may not exist, in which case '
'the client may initialize a repository there.')
subparser.add_argument('--append-only', dest='append_only', action='store_true',
help='only allow appending to repository segment files. Note that this only '
'affects the low level structure of the repository, and running `delete` '
'or `prune` will still be allowed. See :ref:`append_only_mode` in Additional '
'Notes for more details.')
subparser.add_argument('--storage-quota', metavar='QUOTA', dest='storage_quota',
type=parse_storage_quota, default=None,
help='Override storage quota of the repository (e.g. 5G, 1.5T). '
'When a new repository is initialized, sets the storage quota on the new '
'repository as well. Default: no quota.')
# borg umount
umount_epilog = process_epilog("""
This command un-mounts a FUSE filesystem that was mounted with ``borg mount``.
This is a convenience wrapper that just calls the platform-specific shell
command - usually this is either umount or fusermount -u.
""")
subparser = subparsers.add_parser('umount', parents=[common_parser], add_help=False,
description=self.do_umount.__doc__,
epilog=umount_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='umount repository')
subparser.set_defaults(func=self.do_umount)
subparser.add_argument('mountpoint', metavar='MOUNTPOINT', type=str,
help='mountpoint of the filesystem to umount')
# borg upgrade
upgrade_epilog = process_epilog("""
Upgrade an existing, local Borg repository.
When you do not need borg upgrade
+++++++++++++++++++++++++++++++++
Not every change requires that you run ``borg upgrade``.
You do **not** need to run it when:
- moving your repository to a different place
- upgrading to another point release (like 1.0.x to 1.0.y),
except when noted otherwise in the changelog
- upgrading from 1.0.x to 1.1.x,
except when noted otherwise in the changelog
Borg 1.x.y upgrades
+++++++++++++++++++
Use ``borg upgrade --tam REPO`` to require manifest authentication
introduced with Borg 1.0.9 to address security issues. This means
that modifying the repository after doing this with a version prior
to 1.0.9 will raise a validation error, so only perform this upgrade
after updating all clients using the repository to 1.0.9 or newer.
This upgrade should be done on each client for safety reasons.
If a repository is accidentally modified with a pre-1.0.9 client after
this upgrade, use ``borg upgrade --tam --force REPO`` to remedy it.
If you routinely do this you might not want to enable this upgrade
(which will leave you exposed to the security issue). You can
reverse the upgrade by issuing ``borg upgrade --disable-tam REPO``.
See
https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability
for details.
Attic and Borg 0.xx to Borg 1.x
+++++++++++++++++++++++++++++++
This currently supports converting an Attic repository to Borg and also
helps with converting Borg 0.xx to 1.0.
Currently, only LOCAL repositories can be upgraded (issue #465).
Please note that ``borg create`` (since 1.0.0) uses bigger chunks by
default than old borg or attic did, so the new chunks won't deduplicate
with the old chunks in the upgraded repository.
See ``--chunker-params`` option of ``borg create`` and ``borg recreate``.
``borg upgrade`` will change the magic strings in the repository's
segments to match the new Borg magic strings. The keyfiles found in
$ATTIC_KEYS_DIR or ~/.attic/keys/ will also be converted and
copied to $BORG_KEYS_DIR or ~/.config/borg/keys.
The cache files are converted, from $ATTIC_CACHE_DIR or
~/.cache/attic to $BORG_CACHE_DIR or ~/.cache/borg, but the
cache layout between Borg and Attic changed, so it is possible
the first backup after the conversion takes longer than expected
due to the cache resync.
Upgrade should be able to resume if interrupted, although it
will still iterate over all segments. If you want to start
from scratch, use `borg delete` over the copied repository to
make sure the cache files are also removed::
borg delete borg
Unless ``--inplace`` is specified, the upgrade process first creates a backup
copy of the repository, in REPOSITORY.before-upgrade-DATETIME, using hardlinks.
This requires that the repository and its parent directory reside on same
filesystem so the hardlink copy can work.
This takes longer than in place upgrades, but is much safer and gives
progress information (as opposed to ``cp -al``). Once you are satisfied
with the conversion, you can safely destroy the backup copy.
WARNING: Running the upgrade in place will make the current
copy unusable with older version, with no way of going back
to previous versions. This can PERMANENTLY DAMAGE YOUR
REPOSITORY! Attic CAN NOT READ BORG REPOSITORIES, as the
magic strings have changed. You have been warned.""")
subparser = subparsers.add_parser('upgrade', parents=[common_parser], add_help=False,
description=self.do_upgrade.__doc__,
epilog=upgrade_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='upgrade repository format')
subparser.set_defaults(func=self.do_upgrade)
subparser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='do not change repository')
subparser.add_argument('--inplace', dest='inplace', action='store_true',
help='rewrite repository in place, with no chance of going back '
'to older versions of the repository.')
subparser.add_argument('--force', dest='force', action='store_true',
help='Force upgrade')
subparser.add_argument('--tam', dest='tam', action='store_true',
help='Enable manifest authentication (in key and cache) (Borg 1.0.9 and later).')
subparser.add_argument('--disable-tam', dest='disable_tam', action='store_true',
help='Disable manifest authentication (in key and cache).')
subparser.add_argument('location', metavar='REPOSITORY', nargs='?', default='',
type=location_validator(archive=False),
help='path to the repository to be upgraded')
# borg with-lock
with_lock_epilog = process_epilog("""
This command runs a user-specified command while the repository lock is held.
It will first try to acquire the lock (make sure that no other operation is
running in the repo), then execute the given command as a subprocess and wait
for its termination, release the lock and return the user command's return
code as borg's return code.
.. note::
If you copy a repository with the lock held, the lock will be present in
the copy. Thus, before using borg on the copy from a different host,
you need to use "borg break-lock" on the copied repository, because
Borg is cautious and does not automatically remove stale locks made by a different host.
""")
subparser = subparsers.add_parser('with-lock', parents=[common_parser], add_help=False,
description=self.do_with_lock.__doc__,
epilog=with_lock_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help='run user command with lock held')
subparser.set_defaults(func=self.do_with_lock)
subparser.add_argument('location', metavar='REPOSITORY',
type=location_validator(archive=False),
help='repository to lock')
subparser.add_argument('command', metavar='COMMAND',
help='command to run')
subparser.add_argument('args', metavar='ARGS', nargs=argparse.REMAINDER,
help='command arguments')
return parser
def get_args(self, argv, cmd):
"""usually, just returns argv, except if we deal with a ssh forced command for borg serve."""
result = self.parse_args(argv[1:])
if cmd is not None and result.func == self.do_serve:
# borg serve case:
# - "result" is how borg got invoked (e.g. via forced command from authorized_keys),
# - "client_result" (from "cmd") refers to the command the client wanted to execute,
# which might be different in the case of a forced command or same otherwise.
client_argv = shlex.split(cmd)
# Drop environment variables (do *not* interpret them) before trying to parse
# the borg command line.
client_argv = list(itertools.dropwhile(lambda arg: '=' in arg, client_argv))
client_result = self.parse_args(client_argv[1:])
if client_result.func == result.func:
# make sure we only process like normal if the client is executing
# the same command as specified in the forced command, otherwise
# just skip this block and return the forced command (== result).
# client is allowed to specify the whitelisted options,
# everything else comes from the forced "borg serve" command (or the defaults).
# stuff from blacklist must never be used from the client.
blacklist = {
'restrict_to_paths',
'restrict_to_repositories',
'append_only',
'storage_quota',
}
whitelist = {
'debug_topics',
'lock_wait',
'log_level',
'umask',
}
not_present = object()
for attr_name in whitelist:
assert attr_name not in blacklist, 'whitelist has blacklisted attribute name %s' % attr_name
value = getattr(client_result, attr_name, not_present)
if value is not not_present:
# note: it is not possible to specify a whitelisted option via a forced command,
# it always gets overridden by the value specified (or defaulted to) by the client commmand.
setattr(result, attr_name, value)
return result
def parse_args(self, args=None):
# We can't use argparse for "serve" since we don't want it to show up in "Available commands"
if args:
args = self.preprocess_args(args)
parser = self.build_parser()
args = parser.parse_args(args or ['-h'])
parser.common_options.resolve(args)
func = get_func(args)
if func == self.do_create and not args.paths:
# need at least 1 path but args.paths may also be populated from patterns
parser.error('Need at least one PATH argument.')
return args
def prerun_checks(self, logger, is_serve):
if not is_serve:
# this is the borg *client*, we need to check the python:
check_python()
check_extension_modules()
selftest(logger)
def _setup_implied_logging(self, args):
""" turn on INFO level logging for args that imply that they will produce output """
# map of option name to name of logger for that option
option_logger = {
'output_list': 'borg.output.list',
'show_version': 'borg.output.show-version',
'show_rc': 'borg.output.show-rc',
'stats': 'borg.output.stats',
'progress': 'borg.output.progress',
}
for option, logger_name in option_logger.items():
option_set = args.get(option, False)
logging.getLogger(logger_name).setLevel('INFO' if option_set else 'WARN')
def _setup_topic_debugging(self, args):
"""Turn on DEBUG level logging for specified --debug-topics."""
for topic in args.debug_topics:
if '.' not in topic:
topic = 'borg.debug.' + topic
logger.debug('Enabling debug topic %s', topic)
logging.getLogger(topic).setLevel('DEBUG')
def run(self, args):
os.umask(args.umask) # early, before opening files
self.lock_wait = args.lock_wait
func = get_func(args)
# do not use loggers before this!
is_serve = func == self.do_serve
setup_logging(level=args.log_level, is_serve=is_serve, json=args.log_json)
self.log_json = args.log_json
args.progress |= is_serve
self._setup_implied_logging(vars(args))
self._setup_topic_debugging(args)
if getattr(args, 'stats', False) and getattr(args, 'dry_run', False):
logger.error("--stats does not work with --dry-run.")
return self.exit_code
if args.show_version:
logging.getLogger('borg.output.show-version').info('borgbackup version %s' % __version__)
self.prerun_checks(logger, is_serve)
if not is_supported_msgpack():
logger.error("You do not have a supported version of the msgpack python package installed. Terminating.")
logger.error("This should never happen as specific, supported versions are required by our setup.py.")
logger.error("Do not contact borgbackup support about this.")
return set_ec(EXIT_ERROR)
if is_slow_msgpack():
logger.warning(PURE_PYTHON_MSGPACK_WARNING)
if args.debug_profile:
# Import only when needed - avoids a further increase in startup time
import cProfile
import marshal
logger.debug('Writing execution profile to %s', args.debug_profile)
# Open the file early, before running the main program, to avoid
# a very late crash in case the specified path is invalid.
with open(args.debug_profile, 'wb') as fd:
profiler = cProfile.Profile()
variables = dict(locals())
profiler.enable()
try:
return set_ec(func(args))
finally:
profiler.disable()
profiler.snapshot_stats()
if args.debug_profile.endswith('.pyprof'):
marshal.dump(profiler.stats, fd)
else:
# We use msgpack here instead of the marshal module used by cProfile itself,
# because the latter is insecure. Since these files may be shared over the
# internet we don't want a format that is impossible to interpret outside
# an insecure implementation.
# See scripts/msgpack2marshal.py for a small script that turns a msgpack file
# into a marshal file that can be read by e.g. pyprof2calltree.
# For local use it's unnecessary hassle, though, that's why .pyprof makes
# it compatible (see above).
# We do not use our msgpack wrapper here, but directly call mp_pack.
msgpack.mp_pack(profiler.stats, fd, use_bin_type=True)
else:
return set_ec(func(args))
def sig_info_handler(sig_no, stack): # pragma: no cover
"""search the stack for infos about the currently processed file and print them"""
with signal_handler(sig_no, signal.SIG_IGN):
for frame in inspect.getouterframes(stack):
func, loc = frame[3], frame[0].f_locals
if func in ('process_file', '_process', ): # create op
path = loc['path']
try:
pos = loc['fd'].tell()
total = loc['st'].st_size
except Exception:
pos, total = 0, 0
logger.info("{0} {1}/{2}".format(path, format_file_size(pos), format_file_size(total)))
break
if func in ('extract_item', ): # extract op
path = loc['item'].path
try:
pos = loc['fd'].tell()
except Exception:
pos = 0
logger.info("{0} {1}/???".format(path, format_file_size(pos)))
break
def sig_trace_handler(sig_no, stack): # pragma: no cover
print('\nReceived SIGUSR2 at %s, dumping trace...' % datetime.now().replace(microsecond=0), file=sys.stderr)
faulthandler.dump_traceback()
def main(): # pragma: no cover
# Make sure stdout and stderr have errors='replace' to avoid unicode
# issues when print()-ing unicode file names
sys.stdout = ErrorIgnoringTextIOWrapper(sys.stdout.buffer, sys.stdout.encoding, 'replace', line_buffering=True)
sys.stderr = ErrorIgnoringTextIOWrapper(sys.stderr.buffer, sys.stderr.encoding, 'replace', line_buffering=True)
# If we receive SIGINT (ctrl-c), SIGTERM (kill) or SIGHUP (kill -HUP),
# catch them and raise a proper exception that can be handled for an
# orderly exit.
# SIGHUP is important especially for systemd systems, where logind
# sends it when a session exits, in addition to any traditional use.
# Output some info if we receive SIGUSR1 or SIGINFO (ctrl-t).
# Register fault handler for SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL.
faulthandler.enable()
with signal_handler('SIGINT', raising_signal_handler(KeyboardInterrupt)), \
signal_handler('SIGHUP', raising_signal_handler(SigHup)), \
signal_handler('SIGTERM', raising_signal_handler(SigTerm)), \
signal_handler('SIGUSR1', sig_info_handler), \
signal_handler('SIGUSR2', sig_trace_handler), \
signal_handler('SIGINFO', sig_info_handler):
archiver = Archiver()
msg = msgid = tb = None
tb_log_level = logging.ERROR
try:
args = archiver.get_args(sys.argv, os.environ.get('SSH_ORIGINAL_COMMAND'))
except Error as e:
msg = e.get_message()
tb_log_level = logging.ERROR if e.traceback else logging.DEBUG
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
# we might not have logging setup yet, so get out quickly
print(msg, file=sys.stderr)
if tb_log_level == logging.ERROR:
print(tb, file=sys.stderr)
sys.exit(e.exit_code)
try:
with sig_int:
exit_code = archiver.run(args)
except Error as e:
msg = e.get_message()
msgid = type(e).__qualname__
tb_log_level = logging.ERROR if e.traceback else logging.DEBUG
tb = "%s\n%s" % (traceback.format_exc(), sysinfo())
exit_code = e.exit_code
except RemoteRepository.RPCError as e:
important = e.exception_class not in ('LockTimeout', ) and e.traceback
msgid = e.exception_class
tb_log_level = logging.ERROR if important else logging.DEBUG
if important:
msg = e.exception_full
else:
msg = e.get_message()
tb = '\n'.join('Borg server: ' + l for l in e.sysinfo.splitlines())
tb += "\n" + sysinfo()
exit_code = EXIT_ERROR
except Exception:
msg = 'Local Exception'
msgid = 'Exception'
tb_log_level = logging.ERROR
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
exit_code = EXIT_ERROR
except KeyboardInterrupt:
msg = 'Keyboard interrupt'
tb_log_level = logging.DEBUG
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
exit_code = EXIT_ERROR
except SigTerm:
msg = 'Received SIGTERM'
msgid = 'Signal.SIGTERM'
tb_log_level = logging.DEBUG
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
exit_code = EXIT_ERROR
except SigHup:
msg = 'Received SIGHUP.'
msgid = 'Signal.SIGHUP'
exit_code = EXIT_ERROR
if msg:
logger.error(msg, msgid=msgid)
if tb:
logger.log(tb_log_level, tb)
if args.show_rc:
rc_logger = logging.getLogger('borg.output.show-rc')
exit_msg = 'terminating with %s status, rc %d'
if exit_code == EXIT_SUCCESS:
rc_logger.info(exit_msg % ('success', exit_code))
elif exit_code == EXIT_WARNING:
rc_logger.warning(exit_msg % ('warning', exit_code))
elif exit_code == EXIT_ERROR:
rc_logger.error(exit_msg % ('error', exit_code))
else:
rc_logger.error(exit_msg % ('abnormal', exit_code or 666))
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
the-stack_106_23969 | from __future__ import absolute_import
import json
import numpy
import logging
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import NetstringReceiver
from autobahn.twisted.websocket import WebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketServerFactory
from relaax.common.rlx_message import RLXMessage
from .wsproxy_config import options
log = logging.getLogger(__name__)
class ProxyClient(NetstringReceiver):
def __init__(self, client_id, cli_queue, srv_queue):
self.client_id = client_id
self.cli_queue = cli_queue
self.srv_queue = srv_queue
def connectionMade(self):
log.debug("Proxy Client connected to peer for client: {0}".format(self.client_id))
self.cli_queue.get().addCallback(self.serverDataReceived)
def stringReceived(self, data):
msg = RLXMessage.from_wire(data)
msg['sid'] = self.client_id
if 'data' in msg and isinstance(msg['data'], numpy.ndarray):
msg['data'] = msg['data'].tolist()
self.srv_queue.put(json.dumps(msg))
def serverDataReceived(self, data):
if data['command'] == 'disconnect':
self.disconnect()
else:
self.sendString(RLXMessage.to_wire(data))
self.cli_queue.get().addCallback(self.serverDataReceived)
def connectionLost(self, reason):
log.debug("Proxy Client connection lost for client: {0}".format(self.client_id))
self.cli_queue = None
def disconnect(self):
log.debug("Proxy Client disconnecting client: {0}".format(self.client_id))
self.transport.loseConnection()
class ProxyClientFactory(ClientFactory):
def __init__(self, ws, client_id, srv_queue):
self.ws = ws
self.client_id = client_id
self.cli_queue = defer.DeferredQueue()
self.srv_queue = srv_queue
def buildProtocol(self, addr):
client = ProxyClient(
self.client_id,
self.cli_queue,
self.srv_queue)
self.ws.addClient(self.client_id, client)
return client
def clientConnectionLost(self, connector, reason):
self.ws.removeClient(self.client_id)
def clientConnectionFailed(self, connector, reason):
self.ws.removeClient(self.client_id)
class WsServerProtocol(WebSocketServerProtocol):
def __init__(self):
super(WebSocketServerProtocol, self).__init__()
self.clients = {}
self.srv_queue = defer.DeferredQueue()
def addClient(self, client_id, client):
self.clients[client_id] = client
log.debug("Added proxy client. Client sid: {0}".format(client_id))
def removeClient(self, client_id):
if client_id in self.clients:
del self.clients[client_id]
log.debug("Removed proxy client for: {0}, {1}".format(client_id, self.clients))
def onConnect(self, request):
log.debug("Client connecting: {0}".format(request.peer))
self.srv_queue.get().addCallback(self.clientDataReceived)
def onClose(self, wasClean, code, reason):
if wasClean:
log.debug("WebSocket connection closed...")
else:
log.debug("WebSocket connection was broken. Closing code [{0}], reason: {1}".format(code, reason))
for client in self.clients.values():
client.disconnect()
def connectRLX(self, client_id, srv_queue):
factory = ProxyClientFactory(self, client_id, srv_queue)
reactor.connectTCP(options.rlx_server[0], options.rlx_server[1], factory)
return factory.cli_queue
def onMessage(self, payload, isBinary):
msg = json.loads(payload.decode('utf8'))
client = self.clients.get(msg['sid'])
if client is None:
cli_queue = self.connectRLX(msg['sid'], self.srv_queue)
else:
cli_queue = client.cli_queue
cli_queue.put(msg)
def clientDataReceived(self, data):
self.sendMessage(data.encode())
self.srv_queue.get().addCallback(self.clientDataReceived)
def main():
address = "ws://%s:%s" % options.bind
log.info("Starting Web Socket server on %s" % address)
factory = WebSocketServerFactory(address)
factory.protocol = WsServerProtocol
# factory.setProtocolOptions(maxConnections=2)
reactor.listenTCP(int(options.bind[1]), factory)
log.info("Expecting RLX server on %s:%s" % options.rlx_server)
reactor.run()
if __name__ == '__main__':
main()
|
the-stack_106_23973 | from machine import *
from machine import Pin, I2C
import machine
import ssd1306
import time
import utime
switchA = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
switchB = machine.Pin(13, machine.Pin.IN, value = 0)
switchC = machine.Pin(2, machine.Pin.IN, machine.Pin.PULL_UP)
point = 0
settime = (2018, 12, 1, 1, 1, 1, 1, 1)
def switchAcallback(p):
global point
time.sleep(0.1)
if p.value() == 1:
point = point + 1
if(point > 7):
point = 0
def switchBcallback(p):
time.sleep(0.1)
global displaytime
global point
print('in')
temp = list(displaytime)
temp[point] += 1
rtc.datetime(temp)
print('set')
def switchCcallback(p):
time.sleep(0.1)
global displaytime
global point
temp = list(displaytime)
temp[point] -= 1
rtc.datetime(temp)
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAcallback)
switchB.irq(trigger=machine.Pin.IRQ_RISING, handler=switchBcallback)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCcallback)
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4))
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
rtc = RTC()
rtc.datetime(settime)
while 1:
oled.fill(0)
displaytime = rtc.datetime()
oled.text(str(displaytime[0]) + '/' + str(displaytime[1]) + '/' + str(displaytime[2]) + 'Week:' + str(displaytime[3]), 0, 0)
oled.text(str(displaytime[4]) + ':' + str(displaytime[5]) + ':' + str(displaytime[6]), 0, 10)
oled.show()
|
the-stack_106_23974 | import numpy as np
import os
import shutil
import torch
from torch.optim import Optimizer
BEST_MODEL_PATH = 'model_best.pt'
class AveTracker:
def __init__(self):
self.average = 0
self.sum = 0
self.counter = 0
def update(self, value, n):
self.sum += value * n
self.counter += n
self.average = self.sum / self.counter
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state_dict, is_best, savedir, epoch):
if not os.path.exists(savedir):
os.makedirs(savedir)
filename = os.path.join(savedir, 'checkpoint_ep{}.pt'.format(epoch))
torch.save(state_dict, filename)
if is_best:
best_filename = os.path.join(savedir, BEST_MODEL_PATH)
shutil.copyfile(filename, best_filename)
def load_best_model_state_dict(savedir):
"""Loads best model's state dict"""
return torch.load(os.path.join(savedir, BEST_MODEL_PATH))
def count_parameters_in_millions(model):
return np.sum(np.prod(w.size()) for w in model.parameters())/1e6
class Cutout:
"""Applies cutout to input image"""
def __init__(self, length):
assert length >= 0
self.length = length
def __call__(self, img):
if self.length > 0:
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class OneCycleLR:
""" Sets the learing rate of each parameter group by the one cycle learning rate policy
proposed in https://arxiv.org/pdf/1708.07120.pdf.
It is recommended that you set the max_lr to be the learning rate that achieves
the lowest loss in the learning rate range test, and set min_lr to be 1/10 th of max_lr.
So, the learning rate changes like min_lr -> max_lr -> min_lr -> final_lr,
where final_lr = min_lr * reduce_factor.
Note: Currently only supports one parameter group.
Args:
optimizer: (Optimizer) against which we apply this scheduler
num_steps: (int) of total number of steps/iterations
lr_range: (tuple) of min and max values of learning rate
momentum_range: (tuple) of min and max values of momentum
annihilation_frac: (float), fracion of steps to annihilate the learning rate
reduce_factor: (float), denotes the factor by which we annihilate the learning rate at the end
last_step: (int), denotes the last step. Set to -1 to start training from the beginning
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = OneCycleLR(optimizer, num_steps=num_steps, lr_range=(0.1, 1.))
>>> for epoch in range(epochs):
>>> for step in train_dataloader:
>>> train(...)
>>> scheduler.step()
Useful resources:
https://towardsdatascience.com/finding-good-learning-rate-and-the-one-cycle-policy-7159fe1db5d6
https://medium.com/vitalify-asia/whats-up-with-deep-learning-optimizers-since-adam-5c1d862b9db0
"""
def __init__(self,
optimizer: Optimizer,
num_steps: int,
lr_range: tuple = (0.1, 1.),
momentum_range: tuple = (0.85, 0.95),
annihilation_frac: float = 0.1,
reduce_factor: float = 0.01,
last_step: int = -1):
# Sanity check
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
self.num_steps = num_steps
self.min_lr, self.max_lr = lr_range[0], lr_range[1]
assert self.min_lr < self.max_lr, \
"Argument lr_range must be (min_lr, max_lr), where min_lr < max_lr"
self.min_momentum, self.max_momentum = momentum_range[0], momentum_range[1]
assert self.min_momentum < self.max_momentum, \
"Argument momentum_range must be (min_momentum, max_momentum), where min_momentum < max_momentum"
# Total number of steps in the cycle
self.num_cycle_steps = int(num_steps * (1. - annihilation_frac))
self.final_lr = self.min_lr * reduce_factor
self.last_step = last_step
if self.last_step == -1:
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer. (Borrowed from _LRScheduler class in torch.optim.lr_scheduler.py)
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state. (Borrowed from _LRScheduler class in torch.optim.lr_scheduler.py)
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
def get_momentum(self):
return self.optimizer.param_groups[0]['momentum']
def step(self):
"""Conducts one step of learning rate and momentum update
"""
current_step = self.last_step + 1
self.last_step = current_step
if current_step <= self.num_cycle_steps // 2:
# Scale up phase
scale = current_step / (self.num_cycle_steps // 2)
lr = self.min_lr + (self.max_lr - self.min_lr) * scale
momentum = self.max_momentum - \
(self.max_momentum - self.min_momentum) * scale
elif current_step <= self.num_cycle_steps:
# Scale down phase
scale = (current_step - self.num_cycle_steps // 2) / \
(self.num_cycle_steps - self.num_cycle_steps // 2)
lr = self.max_lr - (self.max_lr - self.min_lr) * scale
momentum = self.min_momentum + \
(self.max_momentum - self.min_momentum) * scale
elif current_step <= self.num_steps:
# Annihilation phase: only change lr
scale = (current_step - self.num_cycle_steps) / \
(self.num_steps - self.num_cycle_steps)
lr = self.min_lr - (self.min_lr - self.final_lr) * scale
momentum = None
else:
# Exceeded given num_steps: do nothing
return
self.optimizer.param_groups[0]['lr'] = lr
if momentum:
self.optimizer.param_groups[0]['momentum'] = momentum |
the-stack_106_23975 | import glob
import random
import json
import os
import six
import cv2
import numpy as np
from tqdm import tqdm
from time import time
from .train import find_latest_checkpoint
from .data_utils.data_loader import get_image_array, get_segmentation_array,\
DATA_LOADER_SEED, class_colors, get_pairs_from_paths
from .models.config import IMAGE_ORDERING
random.seed(DATA_LOADER_SEED)
def model_from_checkpoint_path(checkpoints_path):
from .models.all_models import model_from_name
assert (os.path.isfile(checkpoints_path+"_config.json")
), "Checkpoint not found."
model_config = json.loads(
open(checkpoints_path+"_config.json", "r").read())
latest_weights = find_latest_checkpoint(checkpoints_path)
assert (latest_weights is not None), "Checkpoint not found."
model = model_from_name[model_config['model_class']](
model_config['n_classes'], input_height=model_config['input_height'],
input_width=model_config['input_width'])
print("loaded weights ", latest_weights)
status = model.load_weights(latest_weights)
if status is not None:
status.expect_partial()
return model
def get_colored_segmentation_image(seg_arr, n_classes, colors=class_colors):
output_height = seg_arr.shape[0]
output_width = seg_arr.shape[1]
seg_img = np.zeros((output_height, output_width, 3))
for c in range(n_classes):
seg_arr_c = seg_arr[:, :] == c
seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
return seg_img
def get_legends(class_names, colors=class_colors):
n_classes = len(class_names)
legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),
dtype="uint8") + 255
class_names_colors = enumerate(zip(class_names[:n_classes],
colors[:n_classes]))
for (i, (class_name, color)) in class_names_colors:
color = [int(c) for c in color]
cv2.putText(legend, class_name, (5, (i * 25) + 17),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)
cv2.rectangle(legend, (100, (i * 25)), (125, (i * 25) + 25),
tuple(color), -1)
return legend
def overlay_seg_image(inp_img, seg_img):
orininal_h = inp_img.shape[0]
orininal_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (orininal_w, orininal_h), interpolation=cv2.INTER_NEAREST)
fused_img = (inp_img/2 + seg_img/2).astype('uint8')
return fused_img
def concat_lenends(seg_img, legend_img):
new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])
new_w = seg_img.shape[1] + legend_img.shape[1]
out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]
out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)
out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)
return out_img
def visualize_segmentation(seg_arr, inp_img=None, n_classes=None,
colors=class_colors, class_names=None,
overlay_img=False, show_legends=False,
prediction_width=None, prediction_height=None):
if n_classes is None:
n_classes = np.max(seg_arr)
seg_img = get_colored_segmentation_image(seg_arr, n_classes, colors=colors)
if inp_img is not None:
original_h = inp_img.shape[0]
original_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (original_w, original_h), interpolation=cv2.INTER_NEAREST)
if (prediction_height is not None) and (prediction_width is not None):
seg_img = cv2.resize(seg_img, (prediction_width, prediction_height), interpolation=cv2.INTER_NEAREST)
if inp_img is not None:
inp_img = cv2.resize(inp_img,
(prediction_width, prediction_height))
if overlay_img:
assert inp_img is not None
seg_img = overlay_seg_image(inp_img, seg_img)
if show_legends:
assert class_names is not None
legend_img = get_legends(class_names, colors=colors)
seg_img = concat_lenends(seg_img, legend_img)
return seg_img
def predict(model=None, inp=None, out_fname=None,
checkpoints_path=None, overlay_img=False,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None,
read_image_type=1):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
assert (inp is not None)
assert ((type(inp) is np.ndarray) or isinstance(inp, six.string_types)),\
"Input should be the CV image or the input file name"
if isinstance(inp, six.string_types):
inp = cv2.imread(inp, read_image_type)
assert (len(inp.shape) == 3 or len(inp.shape) == 1 or len(inp.shape) == 4), "Image should be h,w,3 "
output_width = model.output_width
output_height = model.output_height
input_width = model.input_width
input_height = model.input_height
n_classes = model.n_classes
x = get_image_array(inp, input_width, input_height,
ordering=IMAGE_ORDERING)
pr = model.predict(np.array([x]))[0]
pr = pr.reshape((output_height, output_width, n_classes)).argmax(axis=2)
seg_img = visualize_segmentation(pr, inp, n_classes=n_classes,
colors=colors, overlay_img=overlay_img,
show_legends=show_legends,
class_names=class_names,
prediction_width=prediction_width,
prediction_height=prediction_height)
if out_fname is not None:
cv2.imwrite(out_fname, seg_img)
#return pr
return seg_img
def predict_multiple(model=None, inps=None, inp_dir=None, out_dir=None,
checkpoints_path=None, overlay_img=False,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None, read_image_type=1):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
if inps is None and (inp_dir is not None):
inps = glob.glob(os.path.join(inp_dir, "*.jpg")) + glob.glob(
os.path.join(inp_dir, "*.png")) + \
glob.glob(os.path.join(inp_dir, "*.jpeg"))
inps = sorted(inps)
assert type(inps) is list
all_prs = []
if not out_dir is None:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for i, inp in enumerate(tqdm(inps)):
if out_dir is None:
out_fname = None
else:
if isinstance(inp, six.string_types):
out_fname = os.path.join(out_dir, os.path.basename(inp))
else:
out_fname = os.path.join(out_dir, str(i) + ".jpg")
pr = predict(model, inp, out_fname,
overlay_img=overlay_img, class_names=class_names,
show_legends=show_legends, colors=colors,
prediction_width=prediction_width,
prediction_height=prediction_height, read_image_type=read_image_type)
all_prs.append(pr)
return all_prs
def set_video(inp, video_name):
cap = cv2.VideoCapture(inp)
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (video_width, video_height)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video = cv2.VideoWriter(video_name, fourcc, fps, size)
return cap, video, fps
def predict_video(model=None, inp=None, output=None,
checkpoints_path=None, display=False, overlay_img=True,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
n_classes = model.n_classes
cap, video, fps = set_video(inp, output)
while(cap.isOpened()):
prev_time = time()
ret, frame = cap.read()
if frame is not None:
pr = predict(model=model, inp=frame)
fused_img = visualize_segmentation(
pr, frame, n_classes=n_classes,
colors=colors,
overlay_img=overlay_img,
show_legends=show_legends,
class_names=class_names,
prediction_width=prediction_width,
prediction_height=prediction_height
)
else:
break
print("FPS: {}".format(1/(time() - prev_time)))
if output is not None:
video.write(fused_img)
if display:
cv2.imshow('Frame masked', fused_img)
if cv2.waitKey(fps) & 0xFF == ord('q'):
break
cap.release()
if output is not None:
video.release()
cv2.destroyAllWindows()
def evaluate(model=None, inp_images=None, annotations=None,
inp_images_dir=None, annotations_dir=None, checkpoints_path=None, read_image_type=1):
if model is None:
assert (checkpoints_path is not None),\
"Please provide the model or the checkpoints_path"
model = model_from_checkpoint_path(checkpoints_path)
if inp_images is None:
assert (inp_images_dir is not None),\
"Please provide inp_images or inp_images_dir"
assert (annotations_dir is not None),\
"Please provide inp_images or inp_images_dir"
paths = get_pairs_from_paths(inp_images_dir, annotations_dir)
paths = list(zip(*paths))
inp_images = list(paths[0])
annotations = list(paths[1])
assert type(inp_images) is list
assert type(annotations) is list
tp = np.zeros(model.n_classes)
fp = np.zeros(model.n_classes)
fn = np.zeros(model.n_classes)
n_pixels = np.zeros(model.n_classes)
for inp, ann in tqdm(zip(inp_images, annotations)):
pr = predict(model, inp, read_image_type=read_image_type)
gt = get_segmentation_array(ann, model.n_classes,
model.output_width, model.output_height,
no_reshape=True, read_image_type=read_image_type)
gt = gt.argmax(-1)
pr = pr.flatten()
gt = gt.flatten()
for cl_i in range(model.n_classes):
tp[cl_i] += np.sum((pr == cl_i) * (gt == cl_i))
fp[cl_i] += np.sum((pr == cl_i) * ((gt != cl_i)))
fn[cl_i] += np.sum((pr != cl_i) * ((gt == cl_i)))
n_pixels[cl_i] += np.sum(gt == cl_i)
cl_wise_score = tp / (tp + fp + fn + 0.000000000001)
n_pixels_norm = n_pixels / np.sum(n_pixels)
frequency_weighted_IU = np.sum(cl_wise_score*n_pixels_norm)
mean_IU = np.mean(cl_wise_score)
return {
"frequency_weighted_IU": frequency_weighted_IU,
"mean_IU": mean_IU,
"class_wise_IU": cl_wise_score
}
|
the-stack_106_23977 | """
A tree of operations, The output goes up the tree, inputs are down
"""
import inspect
import logging
import operator
import re
from base import base
logging.basicConfig(filename='../log/{}.log'.format(__name__), level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Optree:
"""
Apply operations across a binary tree.
"""
opLookUp = {}
resolver = None
evaluatedOutputs = {}
unevaluatedWaiting = {}
def __init__(self, operation=None, output=None, branches=[], leaf=None):
self.unresolvedInputs = branches
self.branches = branches
self.leaf = leaf
self.operation = operation
self.output = output
if not self.unresolvedInputs: # Resolvable
self.evaluate()
def __repr__(self):
params = ', '.join(sorted(["{}={}".format(key, value) for key, value in vars(self).items() if value]))
return "{}({})".format(self.__class__.__name__, params)
def __str__(self):
params = ', '.join(sorted(["{}={}".format(key, value) for key, value in vars(self).items() if value]))
return "{}({})".format(self.__class__.__name__, params)
def evaluate(self):
# print("Evaluating: ", self)
self.unresolvedInputs = [input for input in self.branches if input not in self.evaluatedOutputs]
if self.unresolvedInputs or not self.operation: # Unresolved
for ip in self.unresolvedInputs:
if ip not in self.unevaluatedWaiting:
self.unevaluatedWaiting[ip] = [self]
elif self not in self.unevaluatedWaiting[ip]: # and ip in self.unevaluatedWaiting
self.unevaluatedWaiting[ip].insert(self)
return
elif not self.branches: # Unary operator
self.evaluatedOutputs[self.output] = self.operation(self.leaf)
elif self.leaf: # Non-commutative
# print(self.operation)
self.evaluatedOutputs[self.output] = self.operation(self.evaluatedOutputs[self.branches[0]], self.leaf)
elif self.operation: # Commutative
args = [self.evaluatedOutputs[branch] for branch in self.branches]
self.evaluatedOutputs[self.output] = self.operation(*args)
else:
logger.critical("{}.{}: Unmatched evaluation\"{}\"".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, self))
return None
logger.debug("{}.{}: Evaluated \"{}\"".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, self.evaluatedOutputs[self.output]))
if self.output in self.unevaluatedWaiting:
waitingNodes = self.unevaluatedWaiting.pop(self.output)
for node in waitingNodes:
node.evaluate()
return self.evaluatedOutputs[self.output]
class DummyOpTree(Optree):
opLookUp = {"EMPTYish": True}
def _invert(x):
return x ^ 65535
class BooleanOpTree(Optree):
"""
operator.and_, operator.or_, operator.lshift, operator.rshift, operator.not_
"""
opLookUp = {"NOT": _invert, "LSHIFT": operator.lshift, "RSHIFT": operator.rshift,
"AND": operator.and_, "OR": operator.or_, "ID": int}
evaluatedOutputs = {}
class BinaryTreePlanter():
def __init__(self, fileName=None):
if fileName:
self.evaluatedOutputs = {}
self._populateTree(fileName)
def _invert(self, x):
return x ^ 65535
def _turnDefinitionIntoTreeNode(self, definition):
output = None
inputDef = None
if not definition: # Empty line
return None
else:
inputDef, output = definition.split(' -> ')
logger.debug("{}.{}: definition \"{}\": IPs \"{}\": OP: \'{}\'".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, definition, inputDef, output))
# Task 2 hack
if output == 'b':
inputDef = "46065"
match = re.match("(\d+)$", inputDef)
if match: # Leaf Node
node = BooleanOpTree(operation=BooleanOpTree.opLookUp["ID"], output=output, leaf=int(match.group(0)))
logger.debug("{}.{}: Matched & returned leaf node:\n{}.".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, node))
return node
match = re.match("([a-z]+)$", inputDef)
if match: # Passthrough
node = BooleanOpTree(operation=BooleanOpTree.opLookUp["ID"], output=output, branches=[match.group(0)])
logger.debug("{}.{}: Matched & returned passthrough node:\n{}.".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, node))
return node
match = re.match("([a-z]+) ([A-Z]+) (\d+)", definition)
if match: # Commutative 2 Parameter Boolean
node = BooleanOpTree(operation=BooleanOpTree.opLookUp[match.group(2)], output=output,
branches=[match.group(1)], leaf=int(match.group(3)))
logger.debug("{}.{}: Matched & returned non-commutative node:\n{}.".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, node))
return node
match = re.match("([a-z]+) ([A-Z]+) ([a-z]+)", definition)
if match: # Commutative 2 Parameter Boolean
node = BooleanOpTree(operation=BooleanOpTree.opLookUp[match.group(2)], output=output,
branches=[match.group(1), match.group(3)])
logger.debug("{}.{}: Matched & returned commutative node:\n{}.".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, node))
return node
match = re.match("(\d+) ([A-Z]+) ([a-z]+)", definition)
if match: # Commutative 2 Parameter Boolean treated as non-commutative
node = BooleanOpTree(operation=BooleanOpTree.opLookUp[match.group(2)], output=output,
branches=[match.group(3)], leaf=int(match.group(1)))
logger.debug("{}.{}: Matched & returned commutative node:\n{}.".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, node))
return node
match = re.match("([A-Z]+) ([a-z]+)", definition)
if match:
node = BooleanOpTree(operation=BooleanOpTree.opLookUp[match.group(1)], output=output,
branches=[match.group(2)])
logger.debug("{}.{}: Matched & returned unary operator node:\n{}.".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, node))
return node
print("UNMATCHED \"{}\".".format(inputDef))
logger.critical("{}.{}: Unmatched \"{}\"".format(
self.__class__.__name__, inspect.currentframe().f_code.co_name, inputDef))
def _populateTree(self, fileName):
BooleanOpTree.evaluatedOutputs.clear()
nodes = []
for definition in base.getInputLines(fileName):
node = self._turnDefinitionIntoTreeNode(definition)
if node and node.unresolvedInputs:
nodes.append(node)
nodeCount = len(nodes) + 1
while nodeCount > len(nodes):
nodeCount = len(nodes)
print("Starting loop: {} nodes.".format(nodeCount))
activeNodes = nodes[:]
nodes = []
for node in activeNodes:
node.evaluate()
if node.unresolvedInputs:
nodes.append(node)
self.evaluatedOutputs = {k: v for k, v in BooleanOpTree.evaluatedOutputs.items()}
print("evaluatedOutputs: ", sorted(BooleanOpTree.evaluatedOutputs))
|
the-stack_106_23979 | """
SecureTranport support for urllib3 via ctypes.
This makes platform-native TLS available to urllib3 users on macOS without the
use of a compiler. This is an important feature because the Python Package
Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
this is to give macOS users an alternative solution to the problem, and that
solution is to use SecureTransport.
We use ctypes here because this solution must not require a compiler. That's
because pip is not allowed to require a compiler either.
This is not intended to be a seriously long-term solution to this problem.
The hope is that PEP 543 will eventually solve this issue for us, at which
point we can retire this contrib module. But in the short term, we need to
solve the impending tire fire that is Python on Mac without this kind of
contrib module. So...here we are.
To use this module, simply import and inject it::
import urllib3.contrib.securetransport
urllib3.contrib.securetransport.inject_into_urllib3()
Happy TLSing!
This code is a bastardised version of the code found in Will Bond's oscrypto
library. An enormous debt is owed to him for blazing this trail for us. For
that reason, this code should be considered to be covered both by urllib3's
license and by oscrypto's:
Copyright (c) 2015-2016 Will Bond <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import contextlib
import ctypes
import errno
import os.path
import shutil
import socket
import ssl
import threading
import weakref
from .. import util
from ._securetransport.bindings import Security, SecurityConst, CoreFoundation
from ._securetransport.low_level import (
_assert_no_error,
_cert_array_from_pem,
_temporary_keychain,
_load_client_cert_chain,
)
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
# SNI always works
HAS_SNI = True
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
# This dictionary is used by the read callback to obtain a handle to the
# calling wrapped socket. This is a pretty silly approach, but for now it'll
# do. I feel like I should be able to smuggle a handle to the wrapped socket
# directly in the SSLConnectionRef, but for now this approach will work I
# guess.
#
# We need to lock around this structure for inserts, but we don't do it for
# reads/writes in the callbacks. The reasoning here goes as follows:
#
# 1. It is not possible to call into the callbacks before the dictionary is
# populated, so once in the callback the id must be in the dictionary.
# 2. The callbacks don't mutate the dictionary, they only read from it, and
# so cannot conflict with any of the insertions.
#
# This is good: if we had to lock in the callbacks we'd drastically slow down
# the performance of this code.
_connection_refs = weakref.WeakValueDictionary()
_connection_ref_lock = threading.Lock()
# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
# for no better reason than we need *a* limit, and this one is right there.
SSL_WRITE_BLOCKSIZE = 16384
# This is our equivalent of creator.ssl_.DEFAULT_CIPHERS, but expanded out to
# individual cipher suites. We need to do this because this is how
# SecureTransport wants them.
CIPHER_SUITES = [
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_AES_256_GCM_SHA384,
SecurityConst.TLS_AES_128_GCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_AES_128_CCM_8_SHA256,
SecurityConst.TLS_AES_128_CCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
]
# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
# TLSv1 to 1.2 are supported on macOS 10.8+
_protocol_to_min_max = {
util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12)
}
if hasattr(ssl, "PROTOCOL_SSLv2"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
SecurityConst.kSSLProtocol2,
SecurityConst.kSSLProtocol2,
)
if hasattr(ssl, "PROTOCOL_SSLv3"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
SecurityConst.kSSLProtocol3,
SecurityConst.kSSLProtocol3,
)
if hasattr(ssl, "PROTOCOL_TLSv1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
SecurityConst.kTLSProtocol1,
SecurityConst.kTLSProtocol1,
)
if hasattr(ssl, "PROTOCOL_TLSv1_1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
SecurityConst.kTLSProtocol11,
SecurityConst.kTLSProtocol11,
)
if hasattr(ssl, "PROTOCOL_TLSv1_2"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
SecurityConst.kTLSProtocol12,
SecurityConst.kTLSProtocol12,
)
def inject_into_urllib3():
"""
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
"""
util.SSLContext = SecureTransportContext
util.ssl_.SSLContext = SecureTransportContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_SECURETRANSPORT = True
util.ssl_.IS_SECURETRANSPORT = True
def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False
def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
requested_length = data_length_pointer[0]
timeout = wrapped_socket.gettimeout()
error = None
read_count = 0
try:
while read_count < requested_length:
if timeout is None or timeout >= 0:
if not util.wait_for_read(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
remaining = requested_length - read_count
buffer = (ctypes.c_char * remaining).from_address(
data_buffer + read_count
)
chunk_size = base_socket.recv_into(buffer, remaining)
read_count += chunk_size
if not chunk_size:
if not read_count:
return SecurityConst.errSSLClosedGraceful
break
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = read_count
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = read_count
if read_count != requested_length:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
bytes_to_write = data_length_pointer[0]
data = ctypes.string_at(data_buffer, bytes_to_write)
timeout = wrapped_socket.gettimeout()
error = None
sent = 0
try:
while sent < bytes_to_write:
if timeout is None or timeout >= 0:
if not util.wait_for_write(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
chunk_sent = base_socket.send(data)
sent += chunk_sent
# This has some needless copying here, but I'm not sure there's
# much value in optimising this data path.
data = data[chunk_sent:]
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = sent
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = sent
if sent != bytes_to_write:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
# We need to keep these two objects references alive: if they get GC'd while
# in use then SecureTransport could attempt to call a function that is in freed
# memory. That would be...uh...bad. Yeah, that's the word. Bad.
_read_callback_pointer = Security.SSLReadFunc(_read_callback)
_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
class WrappedSocket(object):
"""
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
collector of PyPy.
"""
def __init__(self, socket):
self.socket = socket
self.context = None
self._makefile_refs = 0
self._closed = False
self._exception = None
self._keychain = None
self._keychain_dir = None
self._client_cert_chain = None
# We save off the previously-configured timeout and then set it to
# zero. This is done because we use select and friends to handle the
# timeouts, but if we leave the timeout set on the lower socket then
# Python will "kindly" call select on that socket again for us. Avoid
# that by forcing the timeout to zero.
self._timeout = self.socket.gettimeout()
self.socket.settimeout(0)
@contextlib.contextmanager
def _raise_on_error(self):
"""
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
"""
self._exception = None
# We explicitly don't catch around this yield because in the unlikely
# event that an exception was hit in the block we don't want to swallow
# it.
yield
if self._exception is not None:
exception, self._exception = self._exception, None
self.close()
raise exception
def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
creator.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
"""
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result)
def _custom_validate(self, verify, trust_bundle):
"""
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
"""
# If we disabled cert validation, just say: cool.
if not verify:
return
# We want data in memory, so load it up.
if os.path.isfile(trust_bundle):
with open(trust_bundle, "rb") as f:
trust_bundle = f.read()
cert_array = None
trust = Security.SecTrustRef()
try:
# Get a CFArray that contains the certs we want.
cert_array = _cert_array_from_pem(trust_bundle)
# Ok, now the hard part. We want to get the SecTrustRef that ST has
# created for this connection, shove our CAs into it, tell ST to
# ignore everything else it knows, and then ask if it can build a
# chain. This is a buuuunch of code.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
raise ssl.SSLError("Failed to copy trust reference")
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
_assert_no_error(result)
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
_assert_no_error(result)
trust_result = Security.SecTrustResultType()
result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
_assert_no_error(result)
finally:
if trust:
CoreFoundation.CFRelease(trust)
if cert_array is not None:
CoreFoundation.CFRelease(cert_array)
# Ok, now we can look at what the result was.
successes = (
SecurityConst.kSecTrustResultUnspecified,
SecurityConst.kSecTrustResultProceed,
)
if trust_result.value not in successes:
raise ssl.SSLError(
"certificate verify failed, error code: %d" % trust_result.value
)
def handshake(
self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase,
):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
"""
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode("utf-8")
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, bufsiz):
buffer = ctypes.create_string_buffer(bufsiz)
bytes_read = self.recv_into(buffer, bufsiz)
data = buffer[:bytes_read]
return data
def recv_into(self, buffer, nbytes=None):
# Read short on EOF.
if self._closed:
return 0
if nbytes is None:
nbytes = len(buffer)
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLRead(
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
)
# There are some result codes that we want to treat as "not always
# errors". Specifically, those are errSSLWouldBlock,
# errSSLClosedGraceful, and errSSLClosedNoNotify.
if result == SecurityConst.errSSLWouldBlock:
# If we didn't process any bytes, then this was just a time out.
# However, we can get errSSLWouldBlock in situations when we *did*
# read some data, and in those cases we should just read "short"
# and return.
if processed_bytes.value == 0:
# Timed out, no data read.
raise socket.timeout("recv timed out")
elif result in (
SecurityConst.errSSLClosedGraceful,
SecurityConst.errSSLClosedNoNotify,
):
# The remote peer has closed this connection. We should do so as
# well. Note that we don't actually return here because in
# principle this could actually be fired along with return data.
# It's unlikely though.
self.close()
else:
_assert_no_error(result)
# Ok, we read and probably succeeded. We should return whatever data
# was actually read.
return processed_bytes.value
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def send(self, data):
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLWrite(
self.context, data, len(data), ctypes.byref(processed_bytes)
)
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
# Timed out
raise socket.timeout("send timed out")
else:
_assert_no_error(result)
# We sent, and probably succeeded. Tell them how much we sent.
return processed_bytes.value
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
with self._raise_on_error():
Security.SSLClose(self.context)
def close(self):
# TODO: should I do clean shutdown here? Do I have to?
if self._makefile_refs < 1:
self._closed = True
if self.context:
CoreFoundation.CFRelease(self.context)
self.context = None
if self._client_cert_chain:
CoreFoundation.CFRelease(self._client_cert_chain)
self._client_cert_chain = None
if self._keychain:
Security.SecKeychainDelete(self._keychain)
CoreFoundation.CFRelease(self._keychain)
shutil.rmtree(self._keychain_dir)
self._keychain = self._keychain_dir = None
return self.socket.close()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
# Urgh, annoying.
#
# Here's how we do this:
#
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
# connection.
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
# 3. To get the CN, call SecCertificateCopyCommonName and process that
# string so that it's of the appropriate type.
# 4. To get the SAN, we need to do something a bit more complex:
# a. Call SecCertificateCopyValues to get the data, requesting
# kSecOIDSubjectAltName.
# b. Mess about with this dictionary to try to get the SANs out.
#
# This is gross. Really gross. It's going to be a few hundred LoC extra
# just to repeat something that SecureTransport can *already do*. So my
# operating assumption at this time is that what we want to do is
# instead to just flag to urllib3 that it shouldn't do its own hostname
# validation when using SecureTransport.
if not binary_form:
raise ValueError("SecureTransport only supports dumping binary certs")
trust = Security.SecTrustRef()
certdata = None
der_bytes = None
try:
# Grab the trust store.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
# Probably we haven't done the handshake yet. No biggie.
return None
cert_count = Security.SecTrustGetCertificateCount(trust)
if not cert_count:
# Also a case that might happen if we haven't handshaked.
# Handshook? Handshaken?
return None
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
assert leaf
# Ok, now we want the DER bytes.
certdata = Security.SecCertificateCopyData(leaf)
assert certdata
data_length = CoreFoundation.CFDataGetLength(certdata)
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
der_bytes = ctypes.string_at(data_buffer, data_length)
finally:
if certdata:
CoreFoundation.CFRelease(certdata)
if trust:
CoreFoundation.CFRelease(trust)
return der_bytes
def version(self):
protocol = Security.SSLProtocol()
result = Security.SSLGetNegotiatedProtocolVersion(
self.context, ctypes.byref(protocol)
)
_assert_no_error(result)
if protocol.value == SecurityConst.kTLSProtocol13:
raise ssl.SSLError("SecureTransport does not support TLS 1.3")
elif protocol.value == SecurityConst.kTLSProtocol12:
return "TLSv1.2"
elif protocol.value == SecurityConst.kTLSProtocol11:
return "TLSv1.1"
elif protocol.value == SecurityConst.kTLSProtocol1:
return "TLSv1"
elif protocol.value == SecurityConst.kSSLProtocol3:
return "SSLv3"
elif protocol.value == SecurityConst.kSSLProtocol2:
return "SSLv2"
else:
raise ssl.SSLError("Unknown TLS version: %r" % protocol)
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
def makefile(self, mode="r", buffering=None, *args, **kwargs):
# We disable buffering with SecureTransport because it conflicts with
# the buffering that ST does internally (see issue #1153 for more).
buffering = 0
return backport_makefile(self, mode, buffering, *args, **kwargs)
WrappedSocket.makefile = makefile
class SecureTransportContext(object):
"""
I am a wrapper class for the SecureTransport library, to translate the
interface of the standard library ``SSLContext`` object to calls into
SecureTransport.
"""
def __init__(self, protocol):
self._min_version, self._max_version = _protocol_to_min_max[protocol]
self._options = 0
self._verify = False
self._trust_bundle = None
self._client_cert = None
self._client_key = None
self._client_key_passphrase = None
@property
def check_hostname(self):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
return True
@check_hostname.setter
def check_hostname(self, value):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
pass
@property
def options(self):
# TODO: Well, crap.
#
# So this is the bit of the code that is the most likely to cause us
# trouble. Essentially we need to enumerate all of the SSL options that
# users might want to use and try to see if we can sensibly translate
# them, or whether we should just ignore them.
return self._options
@options.setter
def options(self, value):
# TODO: Update in line with above.
self._options = value
@property
def verify_mode(self):
return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
@verify_mode.setter
def verify_mode(self, value):
self._verify = True if value == ssl.CERT_REQUIRED else False
def set_default_verify_paths(self):
# So, this has to do something a bit weird. Specifically, what it does
# is nothing.
#
# This means that, if we had previously had load_verify_locations
# called, this does not undo that. We need to do that because it turns
# out that the rest of the urllib3 code will attempt to load the
# default verify paths if it hasn't been told about any paths, even if
# the context itself was sometime earlier. We resolve that by just
# ignoring it.
pass
def load_default_certs(self):
return self.set_default_verify_paths()
def set_ciphers(self, ciphers):
# For now, we just require the default cipher string.
if ciphers != util.ssl_.DEFAULT_CIPHERS:
raise ValueError("SecureTransport doesn't support custom cipher strings")
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# OK, we only really support cadata and cafile.
if capath is not None:
raise ValueError("SecureTransport does not support cert directories")
self._trust_bundle = cafile or cadata
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._client_cert = certfile
self._client_key = keyfile
self._client_cert_passphrase = password
def wrap_socket(
self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None,
):
# So, what do we do here? Firstly, we assert some properties. This is a
# stripped down shim, so there is some functionality we don't support.
# See PEP 543 for the real deal.
assert not server_side
assert do_handshake_on_connect
assert suppress_ragged_eofs
# Ok, we're good to go. Now we want to create the wrapped socket object
# and store it in the appropriate place.
wrapped_socket = WrappedSocket(sock)
# Now we can handshake
wrapped_socket.handshake(
server_hostname,
self._verify,
self._trust_bundle,
self._min_version,
self._max_version,
self._client_cert,
self._client_key,
self._client_key_passphrase,
)
return wrapped_socket
|
the-stack_106_23981 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# Test bootloader behavior for threading code. The default behavior of Python interpreter is to wait for all threads
# before exiting the main process. Bootloader should behave in the same way.
import os
import sys
import threading
_OUT_EXPECTED = ['ONE', 'TWO', 'THREE']
# Code for the subprocess.
if 'PYI_THREAD_TEST_CASE' in os.environ:
class TestThreadClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
print('ONE')
print('TWO')
print('THREE')
# Main process should not exit before the thread stops. This is the behaviour of Python interpreter.
TestThreadClass().start()
# Execute itself in a subprocess.
else:
# Differenciate subprocess code.
itself = sys.argv[0]
# Run subprocess.
import subprocess
# Preserve environment to avoid `Failed to initialize Windows random API (CryptoGen)`
env = dict(os.environ)
env['PYI_THREAD_TEST_CASE'] = 'yes'
proc = subprocess.Popen([itself], stdout=subprocess.PIPE, env=env, stderr=subprocess.PIPE, shell=False)
# Waits for subprocess to complete.
out, err = proc.communicate()
# Make output from subprocess visible.
print(out)
out = out.decode('ascii')
print(out)
# Remove empty lines from output.
out = out.strip().splitlines()
for line in out:
if not line.strip(): # Empty line detected.
out.remove(line)
# Check output.
if out != _OUT_EXPECTED:
print(" +++++++ SUBPROCESS ERROR OUTPUT +++++++")
print(err)
raise SystemExit(
'Subprocess did not print ONE, TWO, THREE in correct order. (output was %r, return code was %s)' %
(out, proc.returncode)
)
|
the-stack_106_23982 | from tkinter import *
class Calc:
def __init__(self):
self.window = Tk()
self.window.title("Calc")
self.window.resizable(0, 0) # Travamos o tamanho da janela conforme os widgets.
self.screen_numbers = Entry(self.window, justify="center", font="arial 20 bold", bg="#D9525E", fg="white", width=26)
self.screen_numbers.pack()
self.frame = Frame(self.window)
self.frame.pack()
color_button_number = "#175073"
color_function_number = "#2E8C83"
self.button_1 = Button(self.frame, bg=color_button_number, bd=0, text="1", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("1"))
self.button_2 = Button(self.frame, bg=color_button_number, bd=0, text="2", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("2"))
self.button_3 = Button(self.frame, bg=color_button_number, bd=0, text="3", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("3"))
self.button_4 = Button(self.frame, bg=color_button_number, bd=0, text="4", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("4"))
self.button_5 = Button(self.frame, bg=color_button_number, bd=0, text="5", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("5"))
self.button_6 = Button(self.frame, bg=color_button_number, bd=0, text="6", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("6"))
self.button_7 = Button(self.frame, bg=color_button_number, bd=0, text="7", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("7"))
self.button_8 = Button(self.frame, bg=color_button_number, bd=0, text="8", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("8"))
self.button_9 = Button(self.frame, bg=color_button_number, bd=0, text="9", font="arial 20 bold", fg="white",
width=5, height=3, command=lambda: self.touch("9"))
self.button_increase = Button(self.frame, bg=color_function_number, bd=0, text="+", font="arial 20 bold",
fg="white", width=5, height=3, command=lambda: self.touch("+"))
self.button_decrease = Button(self.frame, bg=color_function_number, bd=0, text="-", font="arial 20 bold",
fg="white", width=5, height=3, command=lambda: self.touch("-"))
self.button_division = Button(self.frame, bg=color_function_number, bd=0, text="/", font="arial 20 bold",
fg="white", width=5, height=3, command=lambda: self.touch("/"))
self.button_multi = Button(self.frame, bg=color_function_number, bd=0, text="x", font="arial 20 bold",
fg="white", width=5, height=3, command=lambda: self.touch("*"))
self.button_equal = Button(self.frame, bg=color_function_number, bd=0, text="=", font="arial 20 bold",
fg="white", width=12, height=3, command=self.button_total)
self.button_clean = Button(self.frame, bg=color_function_number, bd=0, text="C", font="arial 20 bold",
fg="white", width=5, height=3, command=self.button_clean)
self.button_1.grid(row=0, column=0)
self.button_2.grid(row=0, column=1)
self.button_3.grid(row=0, column=2)
self.button_division.grid(row=0, column=3)
self.button_4.grid(row=1, column=0)
self.button_5.grid(row=1, column=1)
self.button_6.grid(row=1, column=2)
self.button_multi.grid(row=1, column=3)
self.button_7.grid(row=2, column=0)
self.button_8.grid(row=2, column=1)
self.button_9.grid(row=2, column=2)
self.button_decrease.grid(row=2, column=3)
self.button_increase.grid(row=3, column=3)
self.button_clean.grid(row=3, column=2)
self.button_equal.grid(row=3, column=0, columnspan=2)
self.window.mainloop()
def touch(self, num):
self.screen_numbers.insert(END, num)
def button_clean(self):
self.screen_numbers.delete(0, END)
def button_total(self):
t = eval(self.screen_numbers.get())
self.screen_numbers.delete(0, END)
self.screen_numbers.insert(0, str(t))
|
the-stack_106_23984 | from CybORG import CybORG
import inspect
from CybORG.Agents.SimpleAgents.BlueMonitorAgent import BlueMonitorAgent
from CybORG.Agents.SimpleAgents.KeyboardAgent import KeyboardAgent
from CybORG.Agents.Wrappers.RedTableWrapper import RedTableWrapper
from CybORG.Agents import TestAgent
from CybORG.Agents.Wrappers.FixedFlatWrapper import FixedFlatWrapper
from CybORG.Agents.Wrappers.IntListToAction import IntListToActionWrapper
from CybORG.Agents.Wrappers.OpenAIGymWrapper import OpenAIGymWrapper
if __name__ == "__main__":
print("Setup")
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1b.yaml'
cyborg = RedTableWrapper(env=CybORG(path, 'sim',agents={'Blue':BlueMonitorAgent}), output_mode='table')
agent_name = 'Red'
#cyborg = RedTableWrapper(env=IntListToActionWrapper(FixedFlatWrapper(CybORG(path, 'sim'))), output_mode="table")
results = cyborg.reset(agent=agent_name)
observation = results.observation
action_space = results.action_space
agent = KeyboardAgent()
reward = 0
done = False
while True:
print("before:", action_space)
action = agent.get_action(observation, action_space)
results = cyborg.step(agent=agent_name, action=action)
reward += results.reward
observation = results.observation
print("after")
action_space = results.action_space
print(action_space)
break
if done:
print(f"Game Over. Total reward: {reward}")
break
|
the-stack_106_23986 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.script import Script
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
class TezPreUpgrade(Script):
def prepare(self, env):
"""
During the "Upgrade" direction of a Stack Upgrade, it is necessary to ensure that the older tez tarball
has been copied to HDFS. This is an additional check for added robustness.
"""
import params
env.set_params(params)
Logger.info("Before starting Stack Upgrade, check if tez tarball has been copied to HDFS.")
if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS.".format(params.stack_version_formatted))
# Force it to copy the current version of the tez tarball, rather than the version the RU will go to.
resource_created = copy_to_hdfs(
"tez",
params.user_group,
params.hdfs_user,
use_upgrading_version_during_upgrade=False,
host_sys_prepped=params.host_sys_prepped)
if resource_created:
params.HdfsResource(None, action="execute")
else:
raise Fail("Could not copy tez tarball to HDFS.")
if __name__ == "__main__":
TezPreUpgrade().execute()
|
the-stack_106_23988 | from itertools import cycle
from matplotlib import pyplot as plt
from matplotlib import ticker
from ._typing import (
Axes,
Iterable,
Opt_Iter_Float,
Opt_Iter_Str,
Opt_Plot,
Optional,
Plot,
Sequence,
TypeVar,
)
from .orbitals._base_orbital import BaseOrbital
def plotter(
orbitals: Iterable[BaseOrbital],
title: Optional[str] = None,
style: Optional[str] = None,
plot: Opt_Plot = None,
xlim: Optional[tuple[float, float]] = None,
xticks: Optional[tuple[float, float]] = None,
xticks_minor: Iterable | bool = True,
xlabel: Optional[str] = None,
ylim: Optional[tuple[float, float]] = None,
yticks: Optional[Iterable] = None,
yticks_minor: Iterable | bool = True,
ylabel: Optional[str] = None,
labels: Opt_Iter_Str = None,
colors: Opt_Iter_Str = None,
alphas: Opt_Iter_Float = None,
markers: Opt_Iter_Str = None,
linestyles: Opt_Iter_Str = None,
linewidths: Opt_Iter_Float = None,
legend: bool = True,
savefig: Optional[str] = None,
) -> Plot:
"""
Plot an iterable of Orbitals
:param orbitals: orbitals to plot
:param title: title of the plot
:param style: plot-style (e.g. ???), if None, generated by type(orbitals[0]).__name__
:param plot: where to plot, generates new figure if None
:param x*: x-axis setup parameters
:param y*: y-axis setup parameters
:param labels: labels for the orbitals, if None, str(orbital)
:param colors: colors to plot the orbitals
:param alphas: transparency settings to use
:param markers: markers to plot the orbitals
:param linestyles: linestyles to plot the orbitals
:param linewidths: linewidths to plot the orbitals
:param legend: whether to plot a legend
:param savefig: where to save the figure
:return: figure and axes
"""
pass
def plot_orbitals(
orbitals: Sequence[BaseOrbital],
style: str,
ax: Axes,
labels: Opt_Iter_Str = None,
colors: Opt_Iter_Str = None,
alphas: Opt_Iter_Float = None,
markers: Opt_Iter_Str = None,
linestyles: Opt_Iter_Str = None,
linewidths: Opt_Iter_Float = None,
):
"""
Plot Orbitals on an axis.
:param orbitals: the Orbitals to be plotted
:param ax: the axis on which to plot
:param style: the plot style
:param labels: labels for the Orbitals, if None, str(Orbital)
:param colors: the colors to use
:param alphas: transparency settings to use
:param markers: the markers to use at each point on the plot
:param linestyles: the styles of line to use
:param linewidths: the widths of line to use
"""
pass
def plot_orbital(
orbital: BaseOrbital,
style: str,
ax: Axes,
label: Optional[str] = None,
color: Optional[str] = None,
marker: Optional[str] = None,
linestyle: Optional[str] = None,
linewidth: Optional[float] = None,
alpha: Optional[float] = None,
):
"""
Plot an Orbital on an axis.
:param orbital: the orbital to be plotted
:param style: the plot style; if None, generates based on the type(Orbital)
:param ax: the axis on which to plot
:param label: label for the Orbital; if None, str(Orbital)
:param color: the color to use
:param marker: the marker to use at each point on the plot
:param linestyle: the style of line to use
:param linewidth: the width of line to use
:param alpha: transparency setting
:param peaks: peak highlighting parameters
"""
pass
def subplots(style: str, *args, setup_axis_kw: Optional[dict] = None, **kwargs) -> Plot:
"""
Make a (non-squeezed) subplots
"""
kwargs["squeeze"] = False
if "sharex" not in kwargs:
kwargs["sharex"] = True
if "sharey" not in kwargs:
kwargs["sharey"] = True
gridspec_defaults = {
"hspace": 0,
"wspace": 0,
}
gridspec_kw = kwargs["gridspec_kw"] if "gridspec_kw" in kwargs else {}
kwargs["gridspec_kw"] = gridspec_defaults | gridspec_kw
fig, axes = plt.subplots(*args, **kwargs)
setup_axis_kw = setup_axis_kw if setup_axis_kw else {}
setup_axis(axes, style, **setup_axis_kw)
for i, sub_ax in enumerate(axes):
for j, ax in enumerate(sub_ax):
if i != len(axes) - 1:
ax.set_xlabel(None)
if j:
ax.set_ylabel(None)
return fig, axes
def setup_axis( # noqa: C901
ax: Iterable | Axes,
style: Optional[str] = None,
title: Optional[str] = None,
xlim: Optional[tuple[float, float]] = None,
xticks: Optional[tuple[float, float]] = None,
xticks_minor: Iterable | bool = True,
xlabel: Optional[str] = None,
ylim: Optional[tuple[float, float]] = None,
yticks: Optional[tuple[float, float]] = None,
yticks_minor: Iterable | bool = True,
ylabel: Optional[str] = None,
):
"""
Setup the axis labels and limits.
Autogenerates based on style for any variable set to None.
:param ax: axis to setup
:param style: style to use
:param title: title of the axis
:param *lim: limits for *-axis values
:param *ticks: *-axis ticks
:param *ticks_minor: *-axis minor ticks
:param *label: label for the *-axis
"""
if not isinstance(ax, Axes):
for sub_ax in ax:
setup_axis(
sub_ax,
style,
title,
xlim,
xticks,
xticks_minor,
xlabel,
ylim,
yticks,
yticks_minor,
ylabel,
)
else:
# Update values that are None
up = lambda v, d: d if v is None else v
if style:
if style == "BaseOrbital":
pass
elif style == "EnergyOrbital":
ylabel = up(ylabel, "Energy")
elif style == "ComboOrbital":
pass
elif style == "ComboEnergyOrbital":
ylabel = up(ylabel, "Energy")
elif style == "OrbitalGroup":
pass
elif style == "EnergyOrbitalGroup":
ylabel = up(ylabel, "Energy")
elif style == "ComboOrbitalGroup":
pass
elif style == "ComboEnergyOrbitalGroup":
ylabel = up(ylabel, "Energy")
else:
raise NotImplementedError(
f"{style=} is not yet implemented, buy a developer a coffee."
)
ax.set_title(title)
if xticks is not None:
ax.set_xticks(xticks)
if xticks_minor is True:
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
elif xticks_minor is not None:
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(xticks_minor))
if yticks is not None:
ax.set_yticks(yticks)
if yticks_minor is True:
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
elif yticks_minor is not None:
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator(yticks_minor))
if xlim is not None:
ax.set_xlim(*xlim)
if ylim is not None:
ax.set_ylim(*ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
T = TypeVar("T")
def cycle_values(values: Iterable[T] | T) -> Iterable[T]:
"""
Make a cycle iterator of values.
:param values: a value or list of values to be cycled.
:return: iterator of cycled values
"""
if not isinstance(values, Iterable):
values = [values]
yield from cycle(values)
|
the-stack_106_23990 | import os
import cv2
import numpy as np
import pandas as pd
import albumentations
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
class MelanomaDataset(Dataset):
def __init__(self, csv, mode, meta_features, transform=None):
self.csv = csv.reset_index(drop=True)
self.mode = mode
self.use_meta = meta_features is not None
self.meta_features = meta_features
self.transform = transform
def __len__(self):
return self.csv.shape[0]
def __getitem__(self, index):
row = self.csv.iloc[index]
image = cv2.imread(row.filepath)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if self.transform is not None:
res = self.transform(image=image)
image = res['image'].astype(np.float32)
else:
image = image.astype(np.float32)
image = image.transpose(2, 0, 1)
if self.use_meta:
data = (torch.tensor(image).float(), torch.tensor(self.csv.iloc[index][self.meta_features]).float())
else:
data = torch.tensor(image).float()
if self.mode == 'test':
return data
else:
return data, torch.tensor(self.csv.iloc[index].target).long()
def get_transforms(image_size):
transforms_train = albumentations.Compose([
albumentations.Transpose(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.HorizontalFlip(p=0.5),
albumentations.RandomBrightness(limit=0.2, p=0.75),
albumentations.RandomContrast(limit=0.2, p=0.75),
albumentations.OneOf([
albumentations.MotionBlur(blur_limit=5),
albumentations.MedianBlur(blur_limit=5),
albumentations.GaussianBlur(blur_limit=5),
albumentations.GaussNoise(var_limit=(5.0, 30.0)),
], p=0.7),
albumentations.OneOf([
albumentations.OpticalDistortion(distort_limit=1.0),
albumentations.GridDistortion(num_steps=5, distort_limit=1.),
albumentations.ElasticTransform(alpha=3),
], p=0.7),
albumentations.CLAHE(clip_limit=4.0, p=0.7),
albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
albumentations.Resize(image_size, image_size),
albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
albumentations.Normalize()
])
transforms_val = albumentations.Compose([
albumentations.Resize(image_size, image_size),
albumentations.Normalize()
])
return transforms_train, transforms_val
def get_meta_data(df_train, df_test):
# One-hot encoding of anatom_site_general_challenge feature
concat = pd.concat([df_train['anatom_site_general_challenge'], df_test['anatom_site_general_challenge']], ignore_index=True)
dummies = pd.get_dummies(concat, dummy_na=True, dtype=np.uint8, prefix='site')
df_train = pd.concat([df_train, dummies.iloc[:df_train.shape[0]]], axis=1)
df_test = pd.concat([df_test, dummies.iloc[df_train.shape[0]:].reset_index(drop=True)], axis=1)
# Sex features
df_train['sex'] = df_train['sex'].map({'male': 1, 'female': 0})
df_test['sex'] = df_test['sex'].map({'male': 1, 'female': 0})
df_train['sex'] = df_train['sex'].fillna(-1)
df_test['sex'] = df_test['sex'].fillna(-1)
# Age features
df_train['age_approx'] /= 90
df_test['age_approx'] /= 90
df_train['age_approx'] = df_train['age_approx'].fillna(0)
df_test['age_approx'] = df_test['age_approx'].fillna(0)
df_train['patient_id'] = df_train['patient_id'].fillna(0)
# n_image per user
df_train['n_images'] = df_train.patient_id.map(df_train.groupby(['patient_id']).image_name.count())
df_test['n_images'] = df_test.patient_id.map(df_test.groupby(['patient_id']).image_name.count())
df_train.loc[df_train['patient_id'] == -1, 'n_images'] = 1
df_train['n_images'] = np.log1p(df_train['n_images'].values)
df_test['n_images'] = np.log1p(df_test['n_images'].values)
# image size
train_images = df_train['filepath'].values
train_sizes = np.zeros(train_images.shape[0])
for i, img_path in enumerate(tqdm(train_images)):
train_sizes[i] = os.path.getsize(img_path)
df_train['image_size'] = np.log(train_sizes)
test_images = df_test['filepath'].values
test_sizes = np.zeros(test_images.shape[0])
for i, img_path in enumerate(tqdm(test_images)):
test_sizes[i] = os.path.getsize(img_path)
df_test['image_size'] = np.log(test_sizes)
meta_features = ['sex', 'age_approx', 'n_images', 'image_size'] + [col for col in df_train.columns if col.startswith('site_')]
n_meta_features = len(meta_features)
return df_train, df_test, meta_features, n_meta_features
def get_df(kernel_type, out_dim, data_dir, data_folder, use_meta):
# 2020 data
df_train = pd.read_csv(os.path.join(data_dir, f'jpeg-melanoma-{data_folder}x{data_folder}', 'train.csv'))
df_train = df_train[df_train['tfrecord'] != -1].reset_index(drop=True)
df_train['filepath'] = df_train['image_name'].apply(lambda x: os.path.join(data_dir, f'jpeg-melanoma-{data_folder}x{data_folder}/train', f'{x}.jpg'))
if 'newfold' in kernel_type:
tfrecord2fold = {
8:0, 5:0, 11:0,
7:1, 0:1, 6:1,
10:2, 12:2, 13:2,
9:3, 1:3, 3:3,
14:4, 2:4, 4:4,
}
elif 'oldfold' in kernel_type:
tfrecord2fold = {i: i % 5 for i in range(15)}
else:
tfrecord2fold = {
2:0, 4:0, 5:0,
1:1, 10:1, 13:1,
0:2, 9:2, 12:2,
3:3, 8:3, 11:3,
6:4, 7:4, 14:4,
}
df_train['fold'] = df_train['tfrecord'].map(tfrecord2fold)
df_train['is_ext'] = 0
# 2018, 2019 data (external data)
df_train2 = pd.read_csv(os.path.join(data_dir, f'jpeg-isic2019-{data_folder}x{data_folder}', 'train.csv'))
df_train2 = df_train2[df_train2['tfrecord'] >= 0].reset_index(drop=True)
df_train2['filepath'] = df_train2['image_name'].apply(lambda x: os.path.join(data_dir, f'jpeg-isic2019-{data_folder}x{data_folder}/train', f'{x}.jpg'))
if 'newfold' in kernel_type:
df_train2['tfrecord'] = df_train2['tfrecord'] % 15
df_train2['fold'] = df_train2['tfrecord'].map(tfrecord2fold)
else:
df_train2['fold'] = df_train2['tfrecord'] % 5
df_train2['is_ext'] = 1
# Preprocess Target
df_train['diagnosis'] = df_train['diagnosis'].apply(lambda x: x.replace('seborrheic keratosis', 'BKL'))
df_train['diagnosis'] = df_train['diagnosis'].apply(lambda x: x.replace('lichenoid keratosis', 'BKL'))
df_train['diagnosis'] = df_train['diagnosis'].apply(lambda x: x.replace('solar lentigo', 'BKL'))
df_train['diagnosis'] = df_train['diagnosis'].apply(lambda x: x.replace('lentigo NOS', 'BKL'))
df_train['diagnosis'] = df_train['diagnosis'].apply(lambda x: x.replace('cafe-au-lait macule', 'unknown'))
df_train['diagnosis'] = df_train['diagnosis'].apply(lambda x: x.replace('atypical melanocytic proliferation', 'unknown'))
if out_dim == 9:
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('NV', 'nevus'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('MEL', 'melanoma'))
elif out_dim == 4:
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('NV', 'nevus'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('MEL', 'melanoma'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('DF', 'unknown'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('AK', 'unknown'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('SCC', 'unknown'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('VASC', 'unknown'))
df_train2['diagnosis'] = df_train2['diagnosis'].apply(lambda x: x.replace('BCC', 'unknown'))
else:
raise NotImplementedError()
# concat train data
df_train = pd.concat([df_train, df_train2]).reset_index(drop=True)
# test data
df_test = pd.read_csv(os.path.join(data_dir, f'jpeg-melanoma-{data_folder}x{data_folder}', 'test.csv'))
df_test['filepath'] = df_test['image_name'].apply(lambda x: os.path.join(data_dir, f'jpeg-melanoma-{data_folder}x{data_folder}/test', f'{x}.jpg'))
if use_meta:
df_train, df_test, meta_features, n_meta_features = get_meta_data(df_train, df_test)
else:
meta_features = None
n_meta_features = 0
# class mapping
diagnosis2idx = {d: idx for idx, d in enumerate(sorted(df_train.diagnosis.unique()))}
df_train['target'] = df_train['diagnosis'].map(diagnosis2idx)
mel_idx = diagnosis2idx['melanoma']
return df_train, df_test, meta_features, n_meta_features, mel_idx
|
the-stack_106_23993 | # -*- coding: utf-8 -*-
"""Tests for the replace script and ReplaceRobot class."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 04d8c30bad95dfbaedc34bdd259c60cecdacf082 $'
#
import os
import pywikibot
from pywikibot import fixes
from scripts import replace
from tests import _data_dir
from tests.aspects import unittest
from tests.bot_tests import TWNBotTestCase
# Load only the custom fixes
fixes.fixes.clear()
fixes._load_file(os.path.join(_data_dir, 'fixes.py'))
class TestReplacementsMain(TWNBotTestCase):
"""Test various calls of main()."""
SUMMARY_CONFIRMATION = (
'Press Enter to use this automatic message, or enter a '
'description of the\nchanges your bot will make:')
family = 'test'
code = 'test'
cached = False
def setUp(self):
"""Replace the original bot class with a fake one."""
class FakeReplaceBot(replace.ReplaceRobot):
"""A fake bot class for the minimal support."""
changed_pages = -42 # show that weird number to show this was used
def __init__(inner_self, *args, **kwargs):
# Unpatch already here, as otherwise super calls will use
# this class' super which is the class itself
replace.ReplaceRobot = self._original_bot
super(FakeReplaceBot, inner_self).__init__(*args, **kwargs)
self.bots.append(inner_self)
def run(inner_self):
"""Nothing to do here."""
inner_self.changed_pages = -47 # show that run was called
def patched_login(sysop=False):
"""Do nothing."""
pass
def patched_site(*args, **kwargs):
"""Patching a Site instance replacing it's login."""
site = self._original_site(*args, **kwargs)
site.login = patched_login
return site
super(TestReplacementsMain, self).setUp()
self._original_bot = replace.ReplaceRobot
self._original_input = replace.pywikibot.input
self._original_site = replace.pywikibot.Site
self.bots = []
self.inputs = []
replace.ReplaceRobot = FakeReplaceBot
replace.pywikibot.input = self._fake_input
replace.pywikibot.Site = patched_site
def tearDown(self):
"""Bring back the old bot class."""
replace.ReplaceRobot = self._original_bot
replace.pywikibot.input = self._original_input
replace.pywikibot.Site = self._original_site
super(TestReplacementsMain, self).tearDown()
def _fake_input(self, message):
"""Cache the message and return static text "TESTRUN"."""
self.inputs.append(message)
return 'TESTRUN'
def _run(self, *args):
"""Run the L{replace.main} with the given args and summary and page."""
# -page to not have an empty generator
# -lang and -family as it will use Site() otherwise
return replace.main(*(args + ('-lang:test', '-family:test',
'-page:TEST')))
def test_invalid_replacements(self):
"""Test invalid command line replacement configurations."""
# old and new need to be together
self.assertFalse(self._run('foo', '-replacementfile:/dev/null', 'bar'))
# only old provided
self.assertFalse(self._run('foo'))
# In the end no bots should've been created
self.assertFalse(self.bots)
def _test_replacement(self, replacement, clazz=replace.Replacement,
offset=0):
"""Test a replacement from the command line."""
self.assertIsInstance(replacement, clazz)
self.assertEqual(replacement.old, str(offset * 2 + 1))
if not callable(replacement.new):
self.assertEqual(replacement.new, str(offset * 2 + 2))
def _test_fix_replacement(self, replacement, length=1, offset=0, msg=False):
"""Test a replacement from a fix."""
assert length > offset
self._test_replacement(replacement, replace.ReplacementListEntry,
offset)
if msg:
self.assertEqual(replacement.edit_summary,
'M{0}'.format(offset + 1))
else:
self.assertIs(replacement.edit_summary,
replacement.fix_set.edit_summary)
self.assertIs(replacement.fix_set, replacement.container)
self.assertIsInstance(replacement.fix_set, replace.ReplacementList)
self.assertIsInstance(replacement.fix_set, list)
self.assertIn(replacement, replacement.fix_set)
self.assertIs(replacement, replacement.fix_set[offset])
self.assertEqual(len(replacement.fix_set), length)
def _get_bot(self, only_confirmation, *args):
"""Run with arguments, assert and return one bot."""
self.assertIsNone(self._run(*args))
self.assertEqual(len(self.bots), 1)
bot = self.bots[0]
if only_confirmation is not None:
self.assertIn(self.SUMMARY_CONFIRMATION, self.inputs)
if only_confirmation is True:
self.assertEqual(len(self.inputs), 1)
else:
self.assertNotIn(self.SUMMARY_CONFIRMATION, self.inputs)
self.assertEqual(bot.site, self.site)
self.assertEqual(bot.changed_pages, -47)
return bot
def _apply(self, bot, expected, missing=None, title='Test page'):
"""Test applying a test change."""
applied = set()
if missing is True:
required_applied = set()
else:
required_applied = set(bot.replacements)
if missing:
required_applied -= set(missing)
# shouldn't be edited anyway
page = pywikibot.Page(self.site, title)
self.assertEqual(expected,
bot.apply_replacements('Hello 1', applied, page))
self.assertEqual(applied, required_applied)
self.assertEqual(expected, bot.doReplacements('Hello 1', page))
def test_only_cmd(self):
"""Test command line replacements only."""
bot = self._get_bot(True, '1', '2')
self.assertEqual(len(bot.replacements), 1)
self._test_replacement(bot.replacements[0])
def test_cmd_automatic(self):
"""Test command line replacements with automatic summary."""
bot = self._get_bot(None, '1', '2', '-automaticsummary')
self.assertEqual(len(bot.replacements), 1)
self._test_replacement(bot.replacements[0])
self.assertEqual(self.inputs, [])
def test_only_fix_global_message(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:has-msg')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_global_message_tw(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:has-msg-tw')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_no_message(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:no-msg')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_all_replacement_summary(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:all-repl-msg')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0], msg=True)
def test_only_fix_partial_replacement_summary(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:partial-repl-msg')
for offset, replacement in enumerate(bot.replacements):
self._test_fix_replacement(replacement, 2, offset, offset == 0)
self.assertEqual(len(bot.replacements), 2)
def test_only_fix_multiple(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:has-msg-multiple')
for offset, replacement in enumerate(bot.replacements):
self._test_fix_replacement(replacement, 3, offset)
self.assertEqual(len(bot.replacements), 3)
def test_cmd_and_fix(self):
"""Test command line and fix replacements together."""
bot = self._get_bot(True, '1', '2', '-fix:has-msg')
self.assertEqual(len(bot.replacements), 2)
self._test_replacement(bot.replacements[0])
self._test_fix_replacement(bot.replacements[1])
def test_except_title(self):
"""Test excepting and requiring a title specific to fix."""
bot = self._get_bot(True, '-fix:no-msg-title-exceptions')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
self.assertIn('title', bot.replacements[0].exceptions)
self.assertIn('require-title', bot.replacements[0].exceptions)
self._apply(bot, 'Hello 1', missing=True, title='Neither')
self._apply(bot, 'Hello 2', title='Allowed')
self._apply(bot, 'Hello 1', missing=True, title='Allowed Declined')
def test_fix_callable(self):
"""Test fix replacements using a callable."""
bot = self._get_bot(True, '-fix:no-msg-callable')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
self.assertTrue(callable(bot.replacements[0].new))
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
the-stack_106_23995 | import collections
import sys
import pip
import json
import changelogs
import urllib.request
from packaging import version
from urllib.error import HTTPError, URLError
from distutils.version import LooseVersion
SECURITY_NOTICE_KEYWORDS = [
'security', 'vulnerability', 'cve', 'xss', 'sql injection',
]
DISPLAY_TABLE_LABELS = {
'status': 'Status',
'package': 'Package',
'installed': 'Installed',
'latest': 'Latest',
'versions': 'Available Updates',
'notices': 'Security Notices',
}
class PackageVersion(LooseVersion):
def _cmp(self, other):
try:
v1 = version.parse(self.vstring)
v2 = version.parse(str(other))
if v1 < v2:
return -1
elif v1 > v2:
return 1
else:
return 0
except:
return super(PackageVersion, self)._cmp(other)
def get_pkg_info(package):
f = urllib.request.urlopen(
'https://pypi.python.org/pypi/%s/json' % package)
return json.loads(f.readall().decode('utf-8'))
def get_versions(package):
return [(version, package) for version, package in sorted(
package['releases'].items(), key=lambda k: PackageVersion(k[0]), reverse=True)]
def get_version_range(package, installed_version):
compare_version = LooseVersion(installed_version)
return [(version, package) for version, package in get_versions(package)
if PackageVersion(version) > compare_version]
def get_latest_version(package):
versions = get_versions(package)
return versions[0][0] if len(versions) else None
def get_version_diff(package, version_range):
logs = get_changelogs(package.project_name)
versions = []
for version, version_package in version_range:
for release_version, changelog in logs:
if release_version == version and PackageVersion(package.version) < PackageVersion(version):
versions.append((version, changelog))
return versions if len(versions) else []
def get_pkg_security_releases(version_diff):
versions = []
for version, changelog in version_diff:
if _string_contains_security_keywords(changelog):
versions.append((version, changelog))
return versions if len(versions) else None
def get_changelogs(package_name):
versions = changelogs.get(package_name, vendor='pypi')
ret = [(v, versions[v])
for v in sorted(versions.keys(), key=PackageVersion, reverse=True)]
return ret
def get_updates(package, changelog_scan=True):
ret = {
'package': package.project_name,
'installed': package.version,
'latest': None,
}
try:
pkg = get_pkg_info(package.project_name)
latest = get_latest_version(pkg)
if latest:
ret['latest'] = latest
if latest > package.version:
version_range = get_version_range(pkg, package.version)
for version, version_info in version_range:
ret['versions'] = ', '.join(
[version for version, version_info in version_range])
if changelog_scan:
ret['changelogs'] = {}
version_diff = get_version_diff(package, version_range)
for diff_version, diff_changelog in version_diff:
ret['changelogs'][diff_version] = [line.strip() for line in diff_changelog.strip(
).split("\n") if len(line.replace('-', '').strip()) > 0]
sec_releases = get_pkg_security_releases(version_diff)
if sec_releases is not None:
ret['notices'] = ', '.join(
['<%s' % sec_version for sec_version, sec_changelog in sec_releases])
except (HTTPError, URLError) as e:
ret['error'] = str(e)
return ret
def show_updates(changelog_scan=True, all_packages=False, json_out=False, filter_packages=[]):
packages = sorted(pip.get_installed_distributions(),
key=lambda pkg: pkg.project_name.lower())
packages_total = len(packages)
if filter_packages:
packages = filter(
lambda pkg: pkg.project_name in filter_packages, packages)
packages_total = len(filter_packages)
packages_progress = 0
updates = []
if not json_out:
_display_progress(packages_total)
for p in packages:
updates.append(get_updates(p, changelog_scan=changelog_scan))
packages_progress += 1
if not json_out:
_display_progress(packages_total, packages_progress)
if json_out:
sys.stdout.write(json.dumps(updates, indent=4))
else:
_display_table(updates, show_notices=changelog_scan, show_all_packages=all_packages)
def _string_contains_security_keywords(string):
lower = string.lower()
for keyword in SECURITY_NOTICE_KEYWORDS:
if keyword in lower:
return True
return False
def _get_column_lengths(rows, labels):
lens = {k: len(labels[k]) for k in labels.keys()}
for r in range(0, len(rows)):
for k, v in rows[r].items():
if k in labels.keys():
l = len(str(v))
if l > lens[k]:
lens[k] = l
return lens
def _display_table(rows, show_notices=False, show_all_packages=False):
lens = _get_column_lengths(rows, DISPLAY_TABLE_LABELS)
columns = ['package', 'installed', 'latest', 'versions', ]
if show_notices:
columns.append('notices')
row_format = " | ".join(["{:<%s}" % lens[column]
for column in columns]) + "\n"
labels = row_format.format(
*(DISPLAY_TABLE_LABELS[column] for column in columns))
sys.stdout.write("-" * len(labels) + "\n")
sys.stdout.write(labels)
sys.stdout.write("-" * len(labels) + "\n")
for row in rows:
row['versions'] = row['versions'] if 'versions' in row else ''
row['notices'] = row['notices'] if 'notices' in row else ''
row['latest'] = row['latest'] if 'latest' in row and row['latest'] is not None else 'unknown'
if show_all_packages or len(row['versions']) > 0:
sys.stdout.write(row_format.format(
*(row[column] for column in columns)))
sys.stdout.write('\n')
def _display_progress(total, i=0):
percent = ("{0:.1f}").format(100 * (i / float(total)))
sys.stdout.write('\rFetching package information... %s%%\r' % percent)
if i < total:
sys.stdout.write('\rFetching package information... %s%%' % percent)
else:
sys.stdout.write('\r%s\r' % (' ' * 100))
|
the-stack_106_23996 | import torch
import random
import numpy as np
from tqdm import tqdm
from scipy.signal import windows
from torch.utils.data import DataLoader
# Custom packages
import net
import data
import utils
import loss
def getFreqWin():
"""
Window used for weighing the Fourier amplitude spectrum.
"""
win = 100*np.array([
0.01001502, 0.02186158, 0.02468514, 0.02473119, 0.02344306,
0.02420558, 0.02614269, 0.02733992, 0.027928 , 0.02808134,
0.02791206, 0.02747797, 0.02683388, 0.02604171, 0.0251617 ,
0.02424665, 0.02334555, 0.02249787, 0.02173223, 0.02106286,
0.02048341, 0.01998594, 0.01956418, 0.01921331, 0.01892948,
0.0187096 , 0.01855168, 0.01845486, 0.01841943, 0.01844628,
0.01852913, 0.01865568, 0.0188135 , 0.01898964, 0.01917029,
0.01934057, 0.01948487, 0.01959483, 0.01967077, 0.01971386,
0.01972565, 0.019708 , 0.01966303, 0.01959306, 0.01950055,
0.01938807, 0.0192582 , 0.01911351, 0.01895654, 0.01878973,
0.01861543, 0.01843586, 0.01825311, 0.01806913, 0.01788572,
0.01770456, 0.01752718, 0.017355 , 0.01718931, 0.01703132,
0.01688213, 0.01674279, 0.01661427, 0.01649752, 0.01639344
])
return win
def trainingLoop(model, dataloader, loss_func, learning_rate, n_epochs, device='cpu', desc='Default'):
"""
The neural network training loop. This trains the autoencoder to compress the tracks.
Args:
model (nn.Module): The neural network description.
dataloader (torch.Dataloader): The custom pytorch dataloader.
loss_func (nn.Module): The loss function.
learning_rate (float): Learning rate.
n_epochs (int): Number of epochs.
device (str, optional): What device does the computations. Defaults to 'cpu'.
desc (str, optional): The name of the weights saved after each epoch. Defaults to 'Default'.
"""
model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(n_epochs):
loss_total = 0
mse_total = 0
ce_total = 0
# Creates a neat progress bar
pbar = tqdm(total=len(dataloader), dynamic_ncols=True, desc=f'Epoch {epoch + 1}/{n_epochs}')
for i, batch in enumerate(dataloader):
# Resetting gradients
optimizer.zero_grad()
# Loads batch
batch = batch.to(device)
# Inputs batches into NN
outputs = model(batch)
# Calculates loss
loss, mse, ce = loss_func(outputs, batch)
# Backpropagation
loss.backward()
optimizer.step()
# Log losses for progress display
loss_total += loss.item()
mse_total += mse.item()
ce_total += ce.item()
# Only update tqdm sometimes to reduce cpu load
if (i + 1) % 50 == 0:
pbar.set_postfix({'Avg Loss':f'{loss_total/(i+1):.8f}' , 'Avg MSE': f'{mse_total/(i+1):.8f}', 'Avg CE': f'{ce_total/(i+1):.8f}'})
pbar.update(50)
# Save model weights
pbar.close()
torch.save(model.state_dict(), f'weights_{desc}.pth')
if __name__ == '__main__':
##########################
### Dataset Parameters ###
##########################
path = 'fma_small' # Path to fma_small dataset (larger might also work)
n_files = 2000 # Number of files to be included in the training data
duration = 1 # Second to include from track. Set to None for entire track
offset = 10 # Seconds to start loading file from
shuffle_tracks = True # Shuffle tracks
input_size = 2**7 # Framesize for dataloader
#The dataloder "chops" each track into "frames" of this size. This means that this value determines how many samples are put into the network
overlap = 0 # Number of overlapping samples
# Determines if the dataloader should overlap the "frames"
data_win_type = 'boxcar' # Window type applied to samples
# Determines if the dataloader should apply a windows to each frame. Use boxcar (Rectangular) if no window is needed
norm_train_data = True # Normalise samples
# If true, makes sure that the L2-norm of each "frame" is 1
batch_size = 16 # Batch size
shuffle = True # Shuffle Batches
###############################
### Optimization Parameters ###
###############################
n_epochs = 20 # Number of epochs
learning_rate = 1e-7 # Learning rate
beta = 5e3 # The weight of the MSE.
# The higher the value, the higher the MSE is weighted when calculating loss
b = 8 # Bit depth
# 2^b Discrete values produced by the quantiser
q_nodes = 2**8 # Number of neurons in quantization layer
# Defines the bit-rate together with the bit-depth. We are sending q_nodes*b bits
q_interval = (-1,1) # Quantization interval/range
prev_state = '' # Path to previous model parameters
# NOTE that the model must fit the weight, i.e. be the same as what generated the weights
########################
### Model Parameters ###
########################
# Defines the number of convolution blocks to use, as well as the number of kernels/channels to use for each block. (len(conv_features) = number of convolution blocks | conv_features[i] = number of kernels)
conv_features = (
input_size//4,
input_size,
input_size*4
)
time_win_type = 'hann' # Window applied to the MSE
# When calculating the loss, a window is applied to the "frame" before calculating the MSE. To deter high frequency noise, this should weight the edge samples higher. NOTE that this is manually inverted later in the code
kernel_size = 11 # Kernel size
############################
### Dependent Parameters ###
############################
# If a Nvidia GPU is detected, use this instead of the CPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Using {device}!')
# Finds the paths to all the MP3-files and crops the list
paths = utils.findPathsMP3(path)
if shuffle_tracks:
random.seed('plosive voltages') # Hmm...
random.shuffle(paths)
paths = paths[:n_files]
# Generates the needed windows.
data_win = windows.get_window(data_win_type, input_size)
time_win = windows.get_window(time_win_type, input_size, fftbins=False)
time_win = torch.from_numpy(0.005/(time_win + 0.005) + 0.5).to(device)
freq_win = torch.from_numpy(getFreqWin()).to(device)
# Dataset and Dataloader
dataset = data.fmaSmallDataset(paths, data_win, overlap=overlap, normalise=norm_train_data, duration=duration, offset=offset)
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle)
# Loss Function
loss_func = loss.MusicCompLoss(beta, time_win, freq_win)
# Define the name of the weight file save after each epoch
desc = f'Nodes_{q_nodes}__Depth_{b}'
print(f'Now training model with q_nodes={q_nodes} and b={b}')
# Model
model = net.NeuralNetConv(input_size, b, q_interval, q_nodes, kernel_size, conv_features)
# Loads the weights of a previous training
if prev_state:
model.load_state_dict(torch.load(prev_state))
model.eval()
# Do the training
trainingLoop(model, train_loader, loss_func, learning_rate, n_epochs, device=device, desc=desc) |
the-stack_106_23997 | class Solution:
def maxNumber(self, nums1, nums2, k):
def merge(arr1, arr2):
res, i, j = [], 0, 0
while i < len(arr1) and j < len(arr2):
if arr1[i:] >= arr2[j:]:
res.append(arr1[i])
i += 1
else:
res.append(arr2[j])
j += 1
if i < len(arr1): res += arr1[i:]
elif j < len(arr2): res += arr2[j:]
return res
def makeArr(arr, l):
i, res = 0, []
for r in range(l - 1, -1, -1):
num, i = max(arr[i:-r] or arr[i:])
i = -i + 1
res.append(num)
return res
nums1, nums2, choices = [(num, -i) for i, num in enumerate(nums1)], [(num, -i) for i, num in enumerate(nums2)], []
for m in range(k + 1):
if m > len(nums1) or k - m > len(nums2): continue
arr1, arr2 = makeArr(nums1, m), makeArr(nums2, k - m)
choices.append(merge(arr1, arr2))
return max(choices)
|
the-stack_106_24000 | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
#
# idUtil:
# id (uuid) related functions
#
import hashlib
import uuid
from aiohttp.web_exceptions import HTTPServiceUnavailable
from .. import hsds_logger as log
def getIdHash(id):
""" Return md5 prefix based on id value"""
m = hashlib.new('md5')
m.update(id.encode('utf8'))
hexdigest = m.hexdigest()
return hexdigest[:5]
def isSchema2Id(id):
""" return true if this is a v2 id """
# v1 ids are in the standard UUID format: 8-4-4-4-12
# v2 ids are in the non-standard: 8-8-4-6-6
parts = id.split('-')
if len(parts) != 6:
raise ValueError(f"Unexpected id formation for uuid: {id}")
if len(parts[2]) == 8:
return True
else:
return False
def getIdHexChars(id):
""" get the hex chars of the given id """
if id[0] == 'c':
# don't include chunk index
index = id.index('_')
parts = id[0:index].split('-')
else:
parts = id.split('-')
if len(parts) != 6:
raise ValueError(f"Unexpected id format for uuid: {id}")
return "".join(parts[1:])
def hexRot(ch):
""" rotate hex character by 8 """
return format((int(ch, base=16) + 8) % 16, 'x')
def isRootObjId(id):
""" returns true if this is a root id (only for v2 schema) """
if not isSchema2Id(id):
raise ValueError("isRootObjId can only be used with v2 ids")
validateUuid(id) # will throw ValueError exception if not a objid
if id[0] != 'g':
return False # not a group
token = getIdHexChars(id)
# root ids will have last 16 chars rotated version of the first 16
is_root = True
for i in range(16):
if token[i] != hexRot(token[i+16]):
is_root = False
break
return is_root
def getRootObjId(id):
""" returns root id for this objid if this is a root id (only for v2 schema) """
if isRootObjId(id):
return id # this is the root id
token = list(getIdHexChars(id))
# root ids will have last 16 chars rotated version of the first 16
for i in range(16):
token[i+16] = hexRot(token[i])
token = "".join(token)
root_id = 'g-' + token[0:8] + '-' + token[8:16] + '-' + token[16:20] + '-' + token[20:26] + '-' + token[26:32]
return root_id
def createObjId(obj_type, rootid=None):
if obj_type not in ('groups', 'datasets', 'datatypes', 'chunks', "roots"):
raise ValueError("unexpected obj_type")
prefix = None
if obj_type == 'datatypes':
prefix = 't' # don't collide with datasets
elif obj_type == "roots":
prefix = 'g' # root obj is a group
else:
prefix = obj_type[0]
if (not rootid and obj_type != "roots") or (rootid and not isSchema2Id(rootid)):
# v1 schema
objid = prefix + '-' + str(uuid.uuid1())
else:
# schema v2
salt = uuid.uuid4().hex
# take a hash to randomize the uuid
token = list(hashlib.sha256(salt.encode()).hexdigest())
if rootid:
# replace first 16 chars of token with first 16 chars of rootid
root_hex = getIdHexChars(rootid)
token[0:16] = root_hex[0:16]
else:
# obj_type == "roots"
# use only 16 chars, but make it look a 32 char id
for i in range(16):
token[16+i] = hexRot(token[i])
# format as a string
token = "".join(token)
objid = prefix + '-' + token[0:8] + '-' + token[8:16] + '-' + token[16:20] + '-' + token[20:26] + '-' + token[26:32]
return objid
def getS3Key(id):
""" Return s3 key for given id.
For schema v1:
A md5 prefix is added to the front of the returned key to better
distribute S3 objects.
For schema v2:
The id is converted to the pattern: "db/{rootid[0:16]}" for rootids and
"db/id[0:16]/{prefix}/id[16-32]" for other ids
Chunk ids have the chunk index added after the slash: "db/id[0:16]/d/id[16:32]/x_y_z
For domain id's return a key with the .domain suffix and no preceeding slash
"""
if id.find('/') > 0:
# a domain id
domain_suffix = ".domain.json"
index = id.find('/') + 1
key = id[index:]
if not key.endswith(domain_suffix):
if key[-1] != '/':
key += '/'
key += domain_suffix
else:
if isSchema2Id(id):
# schema v2 id
hexid = getIdHexChars(id)
prefix = id[0] # one of g, d, t, c
if prefix not in ('g', 'd', 't', 'c'):
raise ValueError(f"Unexpected id: {id}")
if isRootObjId(id):
key = f"db/{hexid[0:8]}-{hexid[8:16]}"
else:
partition = ""
if prefix == 'c':
s3col = 'd' # so that chunks will show up under their dataset
n = id.find('-')
if n > 1:
# extract the partition index if present
partition = 'p' + id[1:n]
else:
s3col = prefix
key = f"db/{hexid[0:8]}-{hexid[8:16]}/{s3col}/{hexid[16:20]}-{hexid[20:26]}-{hexid[26:32]}"
if prefix == 'c':
if partition:
key += '/'
key += partition
# add the chunk coordinate
index = id.index('_') # will raise ValueError if not found
coord = id[index+1:]
key += '/'
key += coord
elif prefix == 'g':
# add key suffix for group
key += "/.group.json"
elif prefix == 'd':
# add key suffix for dataset
key += "/.dataset.json"
else:
# add key suffix for datatype
key += "/.datatype.json"
else:
# v1 id
# schema v1 id
idhash = getIdHash(id)
key = f"{idhash}-{id}"
return key
def getObjId(s3key):
""" Return object id given valid s3key """
if len(s3key) >= 44 and s3key[0:5].isalnum() and s3key[5] == '-' and s3key[6] in ('g', 'd', 'c', 't'):
# v1 obj keys
objid = s3key[6:]
elif s3key.endswith("/.domain.json"):
objid = '/' + s3key[:-(len("/.domain.json"))]
elif s3key.startswith("db/"):
# schema v2 object key
parts = s3key.split('/')
chunk_coord = "" # used only for chunk ids
partition = "" # likewise
token = []
for ch in parts[1]:
if ch != '-':
token.append(ch)
if len(parts) == 3:
# root id
# last part should be ".group.json"
if parts[2] != ".group.json":
raise ValueError(f"unexpected S3Key: {s3key}")
# add 16 more chars using rotated version of first 16
for i in range(16):
token.append(hexRot(token[i]))
prefix = 'g'
elif len(parts) == 5:
# group, dataset, or datatype or chunk
for ch in parts[3]:
if ch != '-':
token.append(ch)
if parts[2] == 'g' and parts[4] == ".group.json":
prefix = 'g' # group json
elif parts[2] == 't' and parts[4] == ".datatype.json":
prefix = 't' # datatype json
elif parts[2] == 'd':
if parts[4] == ".dataset.json":
prefix = 'd' # dataset json
else:
# chunk object
prefix = 'c'
chunk_coord = "_" + parts[4]
else:
raise ValueError(f"unexpected S3Key: {s3key}")
elif len(parts) == 6:
# chunk key with partitioning
for ch in parts[3]:
if ch != '-':
token.append(ch)
if parts[2][0] != 'd':
raise ValueError(f"unexpected S3Key: {s3key}")
prefix = 'c'
partition = parts[4]
if partition[0] != 'p':
raise ValueError(f"unexpected S3Key: {s3key}")
partition = partition[1:] # strip off the p
chunk_coord = "_" + parts[5]
else:
raise ValueError(f"unexpected S3Key: {s3key}")
token = "".join(token)
objid = prefix + partition + '-' + token[0:8] + '-' + token[8:16] + '-' + token[16:20] + '-' + token[20:26] + '-' + token[26:32] + chunk_coord
else:
raise ValueError(f"unexpected S3Key: {s3key}")
return objid
def isS3ObjKey(s3key):
valid = False
try:
objid = getObjId(s3key)
if objid:
valid = True
except KeyError:
pass # ignore
except ValueError:
pass # ignore
return valid
def createNodeId(prefix):
""" Create a random id used to identify nodes"""
node_uuid = str(uuid.uuid1())
idhash = getIdHash(node_uuid)
key = prefix + "-" + idhash
return key
def getCollectionForId(obj_id):
""" return groups/datasets/datatypes based on id """
if not isinstance(obj_id, str):
raise ValueError("invalid object id")
collection = None
if obj_id.startswith("g-"):
collection = "groups"
elif obj_id.startswith("d-"):
collection = "datasets"
elif obj_id.startswith("t-"):
collection = "datatypes"
else:
raise ValueError("not a collection id")
return collection
def validateUuid(id, obj_class=None):
if not isinstance(id, str):
raise ValueError("Expected string type")
if len(id) < 38:
# id should be prefix (e.g. "g-") and uuid value
raise ValueError("Unexpected id length")
if id[0] not in ('g', 'd', 't', 'c'):
raise ValueError("Unexpected prefix")
if id[0] != 'c' and id[1] != '-':
# chunk ids may have a partition index following the c
raise ValueError("Unexpected prefix")
if obj_class is not None:
obj_class = obj_class.lower()
prefix = obj_class[0]
if obj_class.startswith("datatype"):
prefix = 't'
if id[0] != prefix:
raise ValueError("Unexpected prefix for class: " + obj_class)
if id[0] == 'c':
# trim the type char and any partition id
n = id.find('-')
if n == -1:
raise ValueError("Invalid chunk id")
# trim the chunk index for chunk ids
m = id.find('_')
if m == -1:
raise ValueError("Invalid chunk id")
id = "c-" + id[(n+1):m]
if len(id) != 38:
# id should be 36 now
raise ValueError("Unexpected id length")
for ch in id:
if ch.isalnum():
continue
if ch == '-':
continue
raise ValueError(f"Unexpected character in uuid: {ch}")
def isValidUuid(id, obj_class=None):
try:
validateUuid(id, obj_class)
return True
except ValueError:
return False
def isValidChunkId(id):
if not isValidUuid(id):
return False
if id[0] != 'c':
return False
return True
def getClassForObjId(id):
""" return domains/chunks/groups/datasets/datatypes based on id """
if not isinstance(id, str):
raise ValueError("Expected string type")
if len(id) == 0:
raise ValueError("Empty string")
if id[0] == '/':
return "domains"
if isValidChunkId(id):
return "chunks"
else:
return getCollectionForId(id)
def isObjId(id):
""" return true if uuid or domain """
if not isinstance(id, str) or len(id) == 0:
return False
if id.find('/') > 0:
return True # domain id is any string in the form <bucket_name>/<domain_path>
return isValidUuid(id)
def getUuidFromId(id):
""" strip off the type prefix ('g-' or 'd-', or 't-')
and return the uuid part """
return id[2:]
def getObjPartition(id, count):
""" Get the id of the dn node that should be handling the given obj id
"""
hash_code = getIdHash(id)
hash_value = int(hash_code, 16)
number = hash_value % count
log.debug(f"ID {id} resolved to data node {number}")
return number
def validateInPartition(app, obj_id):
log.debug(f'obj_id: {obj_id}, app[node_count]: {app["node_count"]}, node_number: {app["node_number"]}')
if getObjPartition(obj_id, app['node_count']) != app['node_number']:
# The request shouldn't have come to this node'
msg = "wrong node for 'id':{obj_id}, expected node {app['node_number']} got {getObjPartition(obj_id, app['node_count'])}"
log.error(msg)
raise KeyError(msg)
def getDataNodeUrl(app, obj_id):
""" Return host/port for datanode for given obj_id.
Throw exception if service is not ready"""
dn_urls = app["dn_urls"]
node_count = app["node_count"]
node_state = app["node_state"]
if node_state!= "READY" or node_count <= 0 or node_count != len(dn_urls):
log.info(f"getDataNodeUrl returning 503 - node_state: {node_state} node_count: {node_count} number of dn_urls {len(dn_urls)}")
msg="Service not ready"
log.warn(msg)
raise HTTPServiceUnavailable()
dn_number = getObjPartition(obj_id, node_count)
url = dn_urls[dn_number]
log.debug(f"got dn_url: {url} for obj_id: {obj_id}")
return url
def getDataNodeUrls(app):
""" Return list of all urls to the set of datanodes """
dn_url_map = app["dn_urls"]
dn_urls = []
for id in dn_url_map:
dn_urls.append(dn_url_map[id])
return dn_urls
|
the-stack_106_24003 | from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
class AppTestSorting(BaseNumpyAppTest):
def test_argsort_dtypes(self):
from numpy import array, arange
assert array(2.0).argsort() == 0
nnp = self.non_native_prefix
for dtype in ['int', 'float', 'int16', 'float32', 'uint64',
nnp + 'i2', complex]:
a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
exp = list(a)
exp = sorted(range(len(exp)), key=exp.__getitem__)
c = a.copy()
res = a.argsort()
assert (res == exp).all(), 'Failed sortng %r\na=%r\nres=%r\nexp=%r' % (dtype,a,res,exp)
assert (a == c).all() # not modified
a = arange(100, dtype=dtype)
assert (a.argsort() == a).all()
def test_argsort_ndim(self):
from numpy import array
a = array([[4, 2], [1, 3]])
assert (a.argsort() == [[1, 0], [0, 1]]).all()
a = array(range(10) + range(10) + range(10))
b = a.argsort()
assert (b[:3] == [0, 10, 20]).all()
#trigger timsort 'run' mode which calls arg_getitem_slice
a = array(range(100) + range(100) + range(100))
b = a.argsort()
assert (b[:3] == [0, 100, 200]).all()
a = array([[[]]]).reshape(3,4,0)
b = a.argsort()
assert b.size == 0
def test_argsort_random(self):
from numpy import array
from _random import Random
rnd = Random(1)
a = array([rnd.random() for i in range(512*2)]).reshape(512,2)
a.argsort()
def test_argsort_axis(self):
from numpy import array
a = array([])
for axis in [None, -1, 0]:
assert a.argsort(axis=axis).shape == (0,)
a = array([[4, 2], [1, 3]])
assert (a.argsort(axis=None) == [2, 1, 3, 0]).all()
assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all()
assert (a.argsort(axis=0) == [[1, 0], [0, 1]]).all()
assert (a.argsort(axis=1) == [[1, 0], [0, 1]]).all()
a = array([[3, 2, 1], [1, 2, 3]])
assert (a.argsort(axis=0) == [[1, 0, 0], [0, 1, 1]]).all()
assert (a.argsort(axis=1) == [[2, 1, 0], [0, 1, 2]]).all()
def test_sort_dtypes(self):
from numpy import array, arange
for dtype in ['int', 'float', 'int16', 'float32', 'uint64',
'i2', complex]:
a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
exp = sorted(list(a))
a.sort()
assert (a == exp).all(), 'Failed sorting %r\n%r\n%r' % (dtype, a, exp)
a = arange(100, dtype=dtype)
c = a.copy()
a.sort()
assert (a == c).all(), 'Failed sortng %r\na=%r\nc=%r' % (dtype,a,c)
def test_sort_nonnative(self):
from numpy import array
nnp = self.non_native_prefix
for dtype in [ nnp + 'i2']:
a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
b = array([-1, 3, 3, 4, 6, 8, 100, 101, 256+20], dtype=dtype)
c = a.copy()
import sys
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, a.sort)
assert exc.value[0].find('supported') >= 0
#assert (a == b).all(), \
# 'a,orig,dtype %r,%r,%r' % (a,c,dtype)
# tests from numpy/tests/test_multiarray.py
def test_sort_corner_cases(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
from numpy import array, zeros, arange
from math import isnan
nan = float('nan')
a = array([nan, 1, 0])
b = a.copy()
b.sort()
assert [isnan(bb) for bb in b] == [isnan(aa) for aa in a[::-1]]
assert (b[:2] == a[::-1][:2]).all()
b = a.argsort()
assert (b == [2, 1, 0]).all()
# check complex
a = zeros(9, dtype='complex128')
a.real += [nan, nan, nan, 1, 0, 1, 1, 0, 0]
a.imag += [nan, 1, 0, nan, nan, 1, 0, 1, 0]
b = a.copy()
b.sort()
assert [isnan(bb) for bb in b] == [isnan(aa) for aa in a[::-1]]
assert (b[:4] == a[::-1][:4]).all()
b = a.argsort()
assert (b == [8, 7, 6, 5, 4, 3, 2, 1, 0]).all()
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
# test complex sorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
c = bi.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
c = bi.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = array([[3, 2], [1, 0]])
b = array([[1, 0], [3, 2]])
c = array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert (d == b).all(), "test sort with axis=0"
d = a.copy()
d.sort(axis=1)
assert (d == c).all(), "test sort with axis=1"
d = a.copy()
d.sort()
assert (d == c).all(), "test sort with default axis"
def test_sort_corner_cases_string_records(self):
skip('not implemented yet')
from numpy import array, dtype
# test string sorts.
s = 'aaaaaaaa'
a = array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
# test record array sorts.
dt =dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_unicode(self):
from numpy import array
# test unicode sorts.
s = 'aaaaaaaa'
try:
a = array([s + chr(i) for i in range(101)], dtype=unicode)
b = a[::-1].copy()
except:
skip('unicode type not supported yet')
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_objects(self):
# test object array sorts.
skip('object type not supported yet')
from numpy import empty
try:
a = empty((101,), dtype=object)
except:
skip('object type not supported yet')
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_datetime(self):
from numpy import arange
# test datetime64 sorts.
try:
a = arange(0, 101, dtype='datetime64[D]')
except:
skip('datetime type not supported yet')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
# test timedelta64 sorts.
a = arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_order(self):
from numpy import array, zeros
from sys import byteorder
# Test sorting an array with fields
skip('not implemented yet')
x1 = array([21, 32, 14])
x2 = array(['my', 'first', 'name'])
x3=array([3.1, 4.5, 6.2])
r=zeros(3, dtype=[('id','i'),('word','S5'),('number','f')])
r['id'] = x1
r['word'] = x2
r['number'] = x3
r.sort(order=['id'])
assert (r['id'] == [14, 21, 32]).all()
assert (r['word'] == ['name', 'my', 'first']).all()
assert max(abs(r['number'] - [6.2, 3.1, 4.5])) < 1e-6
r.sort(order=['word'])
assert (r['id'] == [32, 21, 14]).all()
assert (r['word'] == ['first', 'my', 'name']).all()
assert max(abs(r['number'] - [4.5, 3.1, 6.2])) < 1e-6
r.sort(order=['number'])
assert (r['id'] == [21, 32, 14]).all()
assert (r['word'] == ['my', 'first', 'name']).all()
assert max(abs(r['number'] - [3.1, 4.5, 6.2])) < 1e-6
if byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', 'S5'), ('col2', strtype)]
r = array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert (r['col2'] == [1, 3, 255, 258]).all()
assert (r == array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype)).all()
# tests from numpy/core/tests/test_regression.py
def test_sort_bigendian(self):
from numpy import array, dtype
a = array(range(11), dtype='float64')
c = a.astype(dtype('<f8'))
c.sort()
assert max(abs(a-c)) < 1e-32
def test_string_argsort_with_zeros(self):
import numpy as np
import sys
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "x.argsort(kind='m')")
assert 'non-numeric types' in exc.value.message
exc = raises(NotImplementedError, "x.argsort(kind='q')")
assert 'non-numeric types' in exc.value.message
else:
assert (x.argsort(kind='m') == np.array([1, 0])).all()
assert (x.argsort(kind='q') == np.array([1, 0])).all()
def test_string_sort_with_zeros(self):
import numpy as np
import sys
x = np.fromstring("\x00\x02\x00\x01", dtype="S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="S2")
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "x.sort(kind='q')")
assert 'non-numeric types' in exc.value.message
else:
x.sort(kind='q')
assert (x == y).all()
def test_string_mergesort(self):
import numpy as np
import sys
x = np.array(['a'] * 32)
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "x.argsort(kind='m')")
assert 'non-numeric types' in exc.value.message
else:
assert (x.argsort(kind='m') == np.arange(32)).all()
def test_searchsort(self):
import numpy as np
a = np.array(2)
raises(ValueError, a.searchsorted, 3)
a = np.arange(1, 6)
ret = a.searchsorted(3)
assert ret == 2
assert isinstance(ret, np.generic)
ret = a.searchsorted(np.array(3))
assert ret == 2
assert isinstance(ret, np.generic)
ret = a.searchsorted(np.array([]))
assert isinstance(ret, np.ndarray)
assert ret.shape == (0,)
ret = a.searchsorted(np.array([3]))
assert ret == 2
assert isinstance(ret, np.ndarray)
ret = a.searchsorted(np.array([[2, 3]]))
assert (ret == [1, 2]).all()
assert ret.shape == (1, 2)
ret = a.searchsorted(3, side='right')
assert ret == 3
assert isinstance(ret, np.generic)
assert a.searchsorted(3.1) == 3
assert a.searchsorted(3.9) == 3
exc = raises(ValueError, a.searchsorted, 3, side=None)
assert str(exc.value) == "expected nonempty string for keyword 'side'"
exc = raises(ValueError, a.searchsorted, 3, side='')
assert str(exc.value) == "expected nonempty string for keyword 'side'"
exc = raises(ValueError, a.searchsorted, 3, side=2)
assert str(exc.value) == "expected nonempty string for keyword 'side'"
ret = a.searchsorted([-10, 10, 2, 3])
assert (ret == [0, 5, 1, 2]).all()
import sys
if '__pypy__' in sys.builtin_module_names:
raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))")
|
the-stack_106_24004 | import tensorflow as tf
from garage.tf.regressors import Regressor2
from tests.fixtures.models import SimpleMLPModel
class SimpleMLPRegressor(Regressor2):
"""Simple GaussianMLPRegressor for testing."""
def __init__(self, input_shape, output_dim, name, *args, **kwargs):
super().__init__(input_shape, output_dim, name)
self.model = SimpleMLPModel(
output_dim=self._output_dim, name='SimpleMLPModel')
self._initialize()
def _initialize(self):
input_ph = tf.compat.v1.placeholder(
tf.float32, shape=(None, ) + self._input_shape)
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self.model.build(input_ph)
self.ys = None
def fit(self, xs, ys):
self.ys = ys
def predict(self, xs):
if self.ys is None:
outputs = tf.compat.v1.get_default_session().run(
self.model.networks['default'].outputs,
feed_dict={self.model.networks['default'].input: xs})
self.ys = outputs
return self.ys
def get_params_internal(self, *args, **kwargs):
return self._variable_scope.trainable_variables()
def __setstate__(self, state):
"""Object.__setstate__."""
super().__setstate__(state)
self._initialize()
|
the-stack_106_24005 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# Developed by Lutkin Wang
# Check prototype in
# <codeblock props="ios mac" outputclass="language-objectivec">- (void)receiveMetadata:(NSData * _Nonnull)data
# fromUser:(NSInteger)uid atTimestamp:(NSTimeInterval)timestamp;
# </codeblock>
import os
import re
log_name = "log_ios.txt"
def removeComments(string):
string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "",
string) # remove all occurrences streamed comments (/*COMMENT */) from string
string = re.sub(re.compile("//.*?\n"), "",
string) # remove all occurrence single-line comments (//COMMENT\n ) from string
return string
def write_log(text):
with open(log_name, encoding='utf8', mode='a') as f:
f.write(text + "\n")
def read_ditamap(filename):
with open(filename, encoding='utf8', mode='r') as f:
text = f.read()
return text
def main():
# Code location
code_location = "C:\\Users\\WL\\Documents\\rte_sdk\\interface\\objc"
# DITA location
dita_location = "C:\\Users\\WL\\Documents\\GitHub\\doc_source\\dita\\RTC\\API"
# dita_location = "C:\\Users\\WL\\Documents\\GitHub\\doc_source\\en-US\\dita\\RTC\\API"
# DITA map location
dita_map_location = "C:\\Users\\WL\\Documents\\GitHub\\doc_source\\dita\\RTC\\config\\keys-rtc-ng-api-oc.ditamap."
decomment_code_location = "C:\\Users\\WL\\Documents\\nocomment"
# A list of DITA files
dita_file_list = []
# A list of DITA protos
dita_proto_list = []
# A list of code files
code_file_list = []
# A list of proto files
code_proto_list = []
ditamap_content = read_ditamap(dita_map_location)
# Handle the DITA files
for file in os.scandir(dita_location):
if (file.path.endswith(".dita")) and not file.path.startswith(dita_location + "\enum_") and not file.path.startswith(dita_location + "\\rtc_") and file.is_file() and os.path.basename(file) in ditamap_content:
print(file.path)
dita_file_list.append(file.path)
with open(file.path, encoding='utf8') as f:
content = f.read()
# Use substring methods to get the proto from DITA
# Here, we assume that the DITA file contains a single codeblock for each programming language
after_codeblock_start_tag = re.split('<codeblock props="ios mac" outputclass="language-objectivec">',
content)
try:
before_codeblock_end_tag = re.split('</codeblock>', after_codeblock_start_tag[1])
proto_text = before_codeblock_end_tag[0]
except IndexError:
proto_text = "Error: No prototype"
proto_text = proto_text.replace("&", "&")
proto_text = proto_text.replace("<", "<")
proto_text = proto_text.replace(">", ">")
print(proto_text)
dita_proto_list.append(proto_text)
dictionary = dict(zip(dita_file_list, dita_proto_list))
# Handle the interface files
# Decomment all oc files
for root, dirs, files in os.walk(code_location):
for file in files:
if file.endswith(".h"):
with open(os.path.join(root, file), encoding='utf8', mode='r') as f:
text = removeComments(f.read())
with open(decomment_code_location + "/" + "concatenated.h", encoding='utf8', mode='a') as f1:
f1.write(text)
with open(decomment_code_location + "/" + "concatenated.h", encoding='utf8', mode='r') as f:
content = f.read()
content1 = content.replace("&", "&")
content2 = content1.replace("<", "<")
content3 = content2.replace(">", ">")
content4 = content3.replace(" ", "")
content5 = content4.replace("\n", "")
open(log_name, "w").close()
i = 1
write_log("The DITAMAP used is " + dita_map_location + "\n")
for file, code in dictionary.items():
code1 = code.replace("&", "&")
code2 = code1.replace("<", "<")
code3 = code2.replace(">", ">")
code4 = code3.replace(" ", "")
code5 = code4.replace("\n", "")
if content5.find(code5) == -1:
write_log("No. " + str(i) + " Mismatch found")
i = i + 1
write_log("\n")
write_log("-------------------------------------------------------------------------------")
write_log("-------------------------------------------------------------------------------")
write_log("For the DITA file: " + file)
write_log("This prototype in DITA cannot be located in the source code: \n " + code + "\n")
write_log("-------------------------------------------------------------------------------")
write_log("-------------------------------------------------------------------------------")
write_log("\n")
# Clean folder
for root, dirs, files in os.walk(decomment_code_location):
for file in files:
os.remove(os.path.join(root, file))
if __name__ == '__main__':
main() |
the-stack_106_24006 | """Sensor platform for FireServiceRota integration."""
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import HomeAssistantType
from .const import DATA_CLIENT, DOMAIN as FIRESERVICEROTA_DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up FireServiceRota sensor based on a config entry."""
client = hass.data[FIRESERVICEROTA_DOMAIN][entry.entry_id][DATA_CLIENT]
async_add_entities([IncidentsSensor(client)])
class IncidentsSensor(RestoreEntity, SensorEntity):
"""Representation of FireServiceRota incidents sensor."""
def __init__(self, client):
"""Initialize."""
self._client = client
self._entry_id = self._client.entry_id
self._unique_id = f"{self._client.unique_id}_Incidents"
self._state = None
self._state_attributes = {}
@property
def name(self) -> str:
"""Return the name of the sensor."""
return "Incidents"
@property
def icon(self) -> str:
"""Return the icon to use in the frontend."""
if (
"prio" in self._state_attributes
and self._state_attributes["prio"][0] == "a"
):
return "mdi:ambulance"
return "mdi:fire-truck"
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor."""
return self._unique_id
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def extra_state_attributes(self) -> object:
"""Return available attributes for sensor."""
attr = {}
data = self._state_attributes
if not data:
return attr
for value in (
"id",
"trigger",
"created_at",
"message_to_speech_url",
"prio",
"type",
"responder_mode",
"can_respond_until",
):
if data.get(value):
attr[value] = data[value]
if "address" not in data:
continue
for address_value in (
"latitude",
"longitude",
"address_type",
"formatted_address",
):
if address_value in data["address"]:
attr[address_value] = data["address"][address_value]
return attr
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state
self._state_attributes = state.attributes
if "id" in self._state_attributes:
self._client.incident_id = self._state_attributes["id"]
_LOGGER.debug("Restored entity 'Incidents' to: %s", self._state)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{FIRESERVICEROTA_DOMAIN}_{self._entry_id}_update",
self.client_update,
)
)
@callback
def client_update(self) -> None:
"""Handle updated data from the data client."""
data = self._client.websocket.incident_data
if not data or "body" not in data:
return
self._state = data["body"]
self._state_attributes = data
if "id" in self._state_attributes:
self._client.incident_id = self._state_attributes["id"]
self.async_write_ha_state()
|
the-stack_106_24007 | #!/usr/bin/python
# Copyright (c) 2016 Simon van Heeringen <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Command line function 'maelstrom'"""
import os
from gimmemotifs.maelstrom import run_maelstrom
def maelstrom(args):
"""Run the maelstrom method."""
infile = args.inputfile
genome = args.genome
outdir = args.outdir
pfmfile = args.pfmfile
filter_redundant = args.filter_redundant
filter_cutoff = args.filter_cutoff
methods = args.methods
ncpus = args.ncpus
zscore = args.zscore
center = args.center
gc = args.gc
aggregation = args.aggregation
if not os.path.exists(infile):
raise ValueError("file {} does not exist".format(infile))
if methods:
methods = [x.strip() for x in methods.split(",")]
run_maelstrom(
infile,
genome,
outdir,
pfmfile,
filter_redundant=filter_redundant,
filter_cutoff=filter_cutoff,
methods=methods,
ncpus=ncpus,
zscore=zscore,
gc=gc,
center=center,
aggregation=aggregation,
)
|
the-stack_106_24009 | from manim import *
# French Cursive LaTeX font example from http://jf.burnol.free.fr/showcase.html
# Example 1 Manually creating a Template
TemplateForFrenchCursive = TexTemplate(
preamble=r"""
\usepackage[english]{babel}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage[T1]{fontenc}
\usepackage[default]{frcursive}
\usepackage[eulergreek,noplusnominus,noequal,nohbar,%
nolessnomore,noasterisk]{mathastext}
"""
)
def FrenchCursive(*tex_strings, **kwargs):
return Tex(*tex_strings, tex_template=TemplateForFrenchCursive, **kwargs)
class TexFontTemplateManual(Scene):
"""An example scene that uses a manually defined TexTemplate() object to create
LaTeX output in French Cursive font"""
def construct(self):
self.add(Tex("Tex Font Example").to_edge(UL))
self.play(ShowCreation(FrenchCursive("$f: A \\longrightarrow B$").shift(UP)))
self.play(
ShowCreation(FrenchCursive("Behold! We can write math in French Cursive"))
)
self.wait(1)
self.play(
ShowCreation(
Tex(
"See more font templates at \\\\ http://jf.burnol.free.fr/showcase.html"
).shift(2 * DOWN)
)
)
self.wait(2)
# Example 2, using a Template from the collection
class TexFontTemplateLibrary(Scene):
"""An example scene that uses TexTemplate objects from the TexFontTemplates collection
to create sample LaTeX output in every font that will compile on the local system.
Please Note:
Many of the in the TexFontTemplates collection require that specific fonts
are installed on your local machine.
For example, choosing the template TexFontTemplates.comic_sans will
not compile if the Comic Sans Micrososft font is not installed.
This scene will only render those Templates that do not cause a TeX
compilation error on your system. Furthermore, some of the ones that do render,
may still render incorrectly. This is beyond the scope of manim.
Feel free to experiment.
"""
def construct(self):
def write_one_line(template):
x = Tex(template.description, tex_template=template).shift(UP)
self.play(ShowCreation(x))
self.wait(1)
self.play(FadeOut(x))
examples = [
TexFontTemplates.american_typewriter, # "American Typewriter"
TexFontTemplates.antykwa, # "Antykwa Półtawskiego (TX Fonts for Greek and math symbols)"
TexFontTemplates.apple_chancery, # "Apple Chancery"
TexFontTemplates.auriocus_kalligraphicus, # "Auriocus Kalligraphicus (Symbol Greek)"
TexFontTemplates.baskervald_adf_fourier, # "Baskervald ADF with Fourier"
TexFontTemplates.baskerville_it, # "Baskerville (Italic)"
TexFontTemplates.biolinum, # "Biolinum"
TexFontTemplates.brushscriptx, # "BrushScriptX-Italic (PX math and Greek)"
TexFontTemplates.chalkboard_se, # "Chalkboard SE"
TexFontTemplates.chalkduster, # "Chalkduster"
TexFontTemplates.comfortaa, # "Comfortaa"
TexFontTemplates.comic_sans, # "Comic Sans MS"
TexFontTemplates.droid_sans, # "Droid Sans"
TexFontTemplates.droid_sans_it, # "Droid Sans (Italic)"
TexFontTemplates.droid_serif, # "Droid Serif"
TexFontTemplates.droid_serif_px_it, # "Droid Serif (PX math symbols) (Italic)"
TexFontTemplates.ecf_augie, # "ECF Augie (Euler Greek)"
TexFontTemplates.ecf_jd, # "ECF JD (with TX fonts)"
TexFontTemplates.ecf_skeetch, # "ECF Skeetch (CM Greek)"
TexFontTemplates.ecf_tall_paul, # "ECF Tall Paul (with Symbol font)"
TexFontTemplates.ecf_webster, # "ECF Webster (with TX fonts)"
TexFontTemplates.electrum_adf, # "Electrum ADF (CM Greek)"
TexFontTemplates.epigrafica, # Epigrafica
TexFontTemplates.fourier_utopia, # "Fourier Utopia (Fourier upright Greek)"
TexFontTemplates.french_cursive, # "French Cursive (Euler Greek)"
TexFontTemplates.gfs_bodoni, # "GFS Bodoni"
TexFontTemplates.gfs_didot, # "GFS Didot (Italic)"
TexFontTemplates.gfs_neoHellenic, # "GFS NeoHellenic"
TexFontTemplates.gnu_freesans_tx, # "GNU FreeSerif (and TX fonts symbols)"
TexFontTemplates.gnu_freeserif_freesans, # "GNU FreeSerif and FreeSans"
TexFontTemplates.helvetica_fourier_it, # "Helvetica with Fourier (Italic)"
TexFontTemplates.latin_modern_tw_it, # "Latin Modern Typewriter Proportional (CM Greek) (Italic)"
TexFontTemplates.latin_modern_tw, # "Latin Modern Typewriter Proportional"
TexFontTemplates.libertine, # "Libertine"
TexFontTemplates.libris_adf_fourier, # "Libris ADF with Fourier"
TexFontTemplates.minion_pro_myriad_pro, # "Minion Pro and Myriad Pro (and TX fonts symbols)"
TexFontTemplates.minion_pro_tx, # "Minion Pro (and TX fonts symbols)"
TexFontTemplates.new_century_schoolbook, # "New Century Schoolbook (Symbol Greek)"
TexFontTemplates.new_century_schoolbook_px, # "New Century Schoolbook (Symbol Greek, PX math symbols)"
TexFontTemplates.noteworthy_light, # "Noteworthy Light"
TexFontTemplates.palatino, # "Palatino (Symbol Greek)"
TexFontTemplates.papyrus, # "Papyrus"
TexFontTemplates.romande_adf_fourier_it, # "Romande ADF with Fourier (Italic)"
TexFontTemplates.slitex, # "SliTeX (Euler Greek)"
TexFontTemplates.times_fourier_it, # "Times with Fourier (Italic)"
TexFontTemplates.urw_avant_garde, # "URW Avant Garde (Symbol Greek)"
TexFontTemplates.urw_zapf_chancery, # "URW Zapf Chancery (CM Greek)"
TexFontTemplates.venturis_adf_fourier_it, # "Venturis ADF with Fourier (Italic)"
TexFontTemplates.verdana_it, # "Verdana (Italic)"
TexFontTemplates.vollkorn_fourier_it, # "Vollkorn with Fourier (Italic)"
TexFontTemplates.vollkorn, # "Vollkorn (TX fonts for Greek and math symbols)"
TexFontTemplates.zapf_chancery, # "Zapf Chancery"
]
self.add(Tex("Tex Font Template Example").to_edge(UL))
for font in examples:
try:
write_one_line(font)
except:
print("FAILURE on ", font.description, " - skipping.")
self.play(
ShowCreation(
Tex(
"See more font templates at \\\\ http://jf.burnol.free.fr/showcase.html"
).shift(2 * DOWN)
)
)
self.wait(2)
|
the-stack_106_24013 | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 12 17:17:30 2021
@author: Rafael Arenhart
"""
import os
import time
import numpy as np
import tkinter as tk
import tkinter.filedialog as filedialog
import tkinter.messagebox as messagebox
from PIL import Image, ImageTk
import src.io as io
import src.operations as operations
SRC_FOLDER = os.path.dirname(os.path.realpath(__file__))
OPERATIONS = {
'OTSU': (operations.otsu_threshold, True),
'WATE': (operations.watershed, True),
'AOSE': (operations.segregator, True),
'SHAP': (operations.shape_factor, False),
'AAPS': (operations.AA_pore_scale_permeability, False),
'SKEL': (operations.skeletonizer, True),
'SBPS': (operations.SB_pore_scale_permeability, False),
'LABL': (operations.labeling, False),
'ESTL': (operations.export_stl, False),
'RESC': (operations.rescale, False),
'MCAV': (operations.marching_cubes_area_and_volume, False),
'FFSO': (operations.formation_factor_solver, False),
'BKDI': (operations.breakthrough_diameter, False),
'FMCH': (operations.breakthrough_diameter, False)
}
class Interface():
def __init__(self):
self.root = tk.Tk()
self.root.title('Heterogeneous Materials Analyzer')
self.operations_dictionary = {}
self.strings = {}
self.load_config()
self.lang_file = 'hma_' + self.config['language'] + '.lng'
with open(os.path.join(SRC_FOLDER, self.lang_file), mode = 'r') as file:
for line in file:
key, value = line.split(':')
key = key.strip()
value = value.strip()
self.strings[key] = value
for name, properties in OPERATIONS.items():
self.operations_dictionary[self.get_string(name)] = {
'function': properties[0],
'preview': properties[1],
'suffix': "_" + name}
self.selected_operation = tk.StringVar(self.root)
self.selected_operation.set(self.get_string('SELECT_OPERATION'))
self.selected_operation.trace(
'w', lambda w, x, y: self.update_op_description())
self.operations_menu = tk.OptionMenu(self.root,
self.selected_operation, *tuple(self.operations_dictionary.keys()))
self.operations_menu.config(width=50)
self.operations_menu.pack(side = tk.TOP)
self.op_description = tk.Message(self.root , width = 300)
self.op_description.pack(side = tk.TOP)
self.frm_main_buttons = tk.Frame(self.root)
self.frm_main_buttons.pack(side=tk.TOP)
self.btn_select_main = tk.Button(self.frm_main_buttons,
text = self.get_string('MAIN_SELECT_BUTTON'),
command = self.select_image,
state = tk.DISABLED)
self.btn_select_main.pack(side = tk.LEFT, padx = 10)
self.btn_close_main = tk.Button(self.frm_main_buttons,
text = self.get_string('MAIN_CLOSE_BUTTON'),
command = self.root.destroy)
self.btn_close_main .pack(side= tk.LEFT)
self.lbl_extras = tk.Label(self.root, text = 'Extra functions')
self.lbl_extras.pack(side = tk.TOP)
self.frm_extra_buttons = tk.Frame(self.root)
self.frm_extra_buttons.pack(side = tk.TOP)
self.btn_convert_bmp = tk.Button(self.frm_extra_buttons,
text = 'Convert BMP to RAW',
command = self.convert_bmp_to_raw)
self.btn_convert_bmp.pack(side=tk.LEFT, padx = 5)
def load_config(self):
self.config = {}
with open(os.path.join(SRC_FOLDER, 'hma.cfg'), mode='r') as file:
for line in file:
key, value = line.split(':')
key = key.strip()
value = value.strip()
self.config[key] = value
def update_op_description(self):
operation = self.selected_operation.get()
suffix = self.operations_dictionary[operation]['suffix']
description_string = self.get_string('DESCRIPTION' + suffix)
self.op_description.config(text = description_string)
self.btn_select_main.config(state = tk.ACTIVE)
def close_all(self):
self.top_preview.destroy()
self.root.destroy()
def select_image(self):
self.root.withdraw()
self.img_path = filedialog.askopenfilename()
self.img, self.img_config, self.config_order = io.load_raw(self.img_path)
self.top_preview = tk.Toplevel(self.root)
self.top_preview.title('Preview')
self.top_preview.protocol("WM_DELETE_WINDOW", self.close_all)
self.cnv_preview = tk.Canvas(self.top_preview, width=200, height=200)
self.cnv_preview.pack(side = tk.TOP)
self.msg_preview = tk.Message(self.top_preview, width = 120)
self.fill_text_preview(text_widget = self.msg_preview)
self.msg_preview.pack(side = tk.TOP)
self.dct_parameters = {}
self.frm_preview_parameters = tk.Frame(self.top_preview)
self.create_parameters_frame(self.frm_preview_parameters,
self.dct_parameters)
self.frm_preview_parameters.pack(side = tk.TOP)
self.frm_preview_buttons = tk.Frame(self.top_preview)
self.btn_preview_preview = tk.Button(self.frm_preview_buttons,
text = self.get_string('BTN_PREVIEW_PREVIEW'),
command = self.preview_preview)
if not self.operations_dictionary[self.selected_operation.get()]['preview']:
self.btn_preview_preview.config(state = tk.DISABLED)
self.btn_preview_preview.pack(side = tk.LEFT, padx = 10)
self.btn_preview_run = tk.Button(self.frm_preview_buttons,
text = self.get_string('BTN_PREVIEW_RUN'),
command = self.preview_run)
self.btn_preview_run.pack(side = tk.LEFT, padx = 10)
self.btn_preview_cancel = tk.Button(self.frm_preview_buttons,
text = self.get_string('BTN_PREVIEW_CANCEL'),
command = self.preview_cancel)
self.btn_preview_cancel.pack(side = tk.LEFT, padx = 10)
self.frm_preview_buttons.pack(side = tk.TOP)
self.preview_img = None
self.preview_vol = None
self.create_preview_images()
def preview_cancel(self):
self.top_preview.withdraw()
self.root.iconify()
def preview_preview(self):
op = self.selected_operation.get()
op_suffix = self.operations_dictionary[op]['suffix']
if op_suffix == '_OTSU':
pre_im = operations.otsu_threshold(self.preview_vol[:,:,0])
elif op_suffix == '_WATE':
compactness = float(self.dct_parameters['compactness'].get())
pre_im = operations.watershed(self.preview_vol, compactness, two_d = True)
pre_im = pre_im[:,:,1]
elif op_suffix == '_AOSE':
threshold = float(self.dct_parameters['threshold'].get())
pre_im = operations.segregator(self.preview_vol, threshold, two_d = True)
pre_im = pre_im[:,:,1]
elif op_suffix == '_SKEL':
pre_im = operations.skeletonizer(self.preview_vol).sum(axis = 2)
self.preview_img = np.array(Image.fromarray(pre_im).resize((300,300)))
self.preview_img = (self.preview_img/np.max(self.preview_img))*254
self.preview_img = self.preview_img.astype('int8')
self.tk_img = ImageTk.PhotoImage(image=
Image.fromarray(self.preview_img))
self.cnv_preview.create_image((0,0), anchor="nw", image=self.tk_img)
def preview_run(self):
op = self.selected_operation.get()
op_suffix = self.operations_dictionary[op]['suffix']
self.dct_parameters
if op_suffix == '_OTSU':
out_img = operations.otsu_threshold(self.img)
io.save_raw(self.img_path[:-4]+op_suffix+'.raw',
out_img,
self.img_config,
self.config_order)
elif op_suffix == '_WATE':
try:
compactness = float(self.dct_parameters['compactness'].get())
except ValueError:
messagebox.showinfo('Error', 'Entry is not a float')
return
out_img = operations.watershed(self.img, compactness)
io.save_raw(self.img_path[:-4]+op_suffix+'.raw',
out_img,
self.img_config,
self.config_order)
elif op_suffix == '_AOSE':
try:
threshold = float(self.dct_parameters['threshold'].get())
except ValueError:
messagebox.showinfo('Error', 'Entry is not a float')
return
out_img = operations.segregator(self.img, threshold)
io.save_raw(self.img_path[:-4]+op_suffix+'.raw',
out_img,
self.img_config,
self.config_order)
elif op_suffix == '_SHAP':
factors = []
for i in ('volume', 'surface', 'hidraulic radius',
'equivalent diameter', 'irregularity'):
if self.dct_parameters[i].get() == 1:
factors.append(i)
header, lines = operations.shape_factor(self.img, factors)
with open(self.img_path[:-4]+op_suffix+'.txt', mode = 'w') as file:
file.write(header+'\n')
file.write(lines)
elif op_suffix == '_SKEL':
out_img = operations.skeletonizer(self.img)
io.save_raw(self.img_path[:-4]+op_suffix+'.raw',
out_img,
self.img_config,
self.config_order)
elif op_suffix == '_LABL':
out_img = operations.labeling(self.img)
io.save_raw(self.img_path[:-4]+op_suffix+'.raw',
out_img,
self.img_config,
self.config_order)
elif op_suffix == '_AAPP':
permeability = operations.AA_pore_scale_permeability(self.img)
messagebox.showinfo('Permeability result',
f'Calculated permeability is {permeability.solution}')
elif op_suffix == '_ESTL':
save_path = self.img_path[:-4]+'.stl'
try:
step_size = int(self.dct_parameters['step_size'].get())
except ValueError:
messagebox.showinfo('Error', 'Entry is not a integer')
return
operations.export_stl(self.img, save_path, step_size)
elif op_suffix == '_RESC':
try:
factor = float(self.dct_parameters['factor'].get())
except ValueError:
messagebox.showinfo('Error', 'Entry is not a float')
return
out_img = operations.rescale(self.img, factor)
io.save_raw(self.img_path[:-4]+op_suffix+'.raw',
out_img,
self.img_config,
self.config_order)
elif op_suffix == '_MCAV':
start = time.perf_counter()
areas, volumes = operations.marching_cubes_area_and_volume(self.img)
with open(self.img_path[:-4]+op_suffix+'.txt', mode = 'w') as file:
file.write('Index\tArea\tVolume\n')
for i in range(1,len(areas)):
file.write(f'{i}\t{areas[i]}\t{volumes[i]}\n')
print(time.perf_counter() - start)
elif op_suffix == '_BKDI':
start = time.perf_counter()
step = float(self.dct_parameters['step'].get())
diameter = operations.breakthrough_diameter(self.img, step)
with open(self.img_path[:-4]+op_suffix+'.txt', mode = 'w') as file:
file.write(f'{self.img_path} - Breakthrough diameter = {diameter}')
print(time.perf_counter() - start)
elif op_suffix == '_FMCH':
start = time.perf_counter()
characterizations = operations.full_morphology_characterization(self.img)
with open(self.img_path[:-4]+op_suffix+'.txt', mode = 'w') as file:
for key, values in characterizations.items():
file.write(f'{key},{str(values)[1:-1]}\n')
print(time.perf_counter() - start)
#elif op_suffix == '_AAPP':
#elif op_suffix == '_SBPP':
messagebox.showinfo('Done', 'Done')
self.top_preview.withdraw()
self.root.iconify()
def create_preview_images(self):
if self.img.shape[2] > 5:
middle_slice = self.img.shape[2]//2
self.preview_vol = self.img[:, :, middle_slice-2 : middle_slice+3]
else:
self.preview_vol = self.img.copy()
self.preview_img = np.array(Image.fromarray(
self.preview_vol[:,:,0]).resize((300,300)))
self.preview_img = (self.preview_img/np.max(self.preview_img))*254
self.preview_img = self.preview_img.astype('uint8')
self.tk_img = ImageTk.PhotoImage(image=
Image.fromarray(self.preview_img))
self.cnv_preview.create_image((0,0), anchor="nw", image=self.tk_img)
def create_parameters_frame(self, frame, dict_parameters):
op = self.selected_operation.get()
op_suffix = self.operations_dictionary[op]['suffix']
if op_suffix == '_OTSU':
pass
elif op_suffix == '_WATE':
dict_parameters['compactness'] = tk.StringVar()
self.lbl_param_threshold = tk.Label(frame, text = 'Compactness: ')
self.ent_param_threshold = tk.Entry(frame,
textvariable = dict_parameters['compactness'])
self.lbl_param_threshold.grid(row=0, column = 0)
self.ent_param_threshold.grid(row=0, column = 1)
elif op_suffix == '_AOSE':
dict_parameters['threshold'] = tk.StringVar()
self.lbl_param_threshold = tk.Label(frame, text = 'Threshold: ')
self.ent_param_threshold = tk.Entry(frame,
textvariable = dict_parameters['threshold'])
self.lbl_param_threshold.grid(row=0, column = 0)
self.ent_param_threshold.grid(row=0, column = 1)
elif op_suffix == '_SHAP':
for i in ('volume', 'surface', 'hidraulic radius',
'equivalent diameter', 'irregularity'):
dict_parameters[i] = tk.IntVar()
dict_parameters[i].set(1)
tk.Checkbutton(frame, text=i, variable=dict_parameters[i]).pack(side= tk.TOP)
elif op_suffix == '_ESTL':
dict_parameters['step_size'] = tk.StringVar()
self.lbl_param_stepsize = tk.Label(frame, text = 'Step size: ')
self.ent_param_stepsize = tk.Entry(frame,
textvariable = dict_parameters['step_size'])
self.lbl_param_stepsize.grid(row = 0, column = 0)
self.ent_param_stepsize.grid(row = 0, column = 1)
elif op_suffix == '_RESC':
dict_parameters['factor'] = tk.StringVar()
self.lbl_param_factor = tk.Label(frame, text = 'Rescaling factor: ')
self.ent_param_factor = tk.Entry(frame,
textvariable = dict_parameters['factor'])
self.lbl_param_factor.grid(row = 0, column = 0)
self.ent_param_factor.grid(row = 0, column = 1)
elif op_suffix == '_BKDI':
dict_parameters['step'] = tk.StringVar()
dict_parameters['step'].set('0.1')
self.lbl_param_factor = tk.Label(frame, text = 'Erosion step: ')
self.ent_param_factor = tk.Entry(frame,
textvariable = dict_parameters['step'])
self.lbl_param_factor.grid(row = 0, column = 0)
self.ent_param_factor.grid(row = 0, column = 1)
elif op_suffix == '_FFSO':
#TODO
pass
#elif op_suffix == '_AAPP':
#elif op_suffix == '_AOSE':
#elif op_suffix == '_SKEL':
#elif op_suffix == '_SBPP':
def fill_text_preview(self, text_widget):
name = self.img_path.split('/')[-1]
dtype = str(self.img.dtype)
if 'int' in dtype and self.img.max() <= 1 and self.img.min() >=0:
binary = ' (binary)'
else:
binary = ''
shape = str(self.img.shape)
text_widget.config(text=f'{name}\n{dtype} {binary}\n{shape}')
def get_string(self, str_key):
if str_key in self.strings:
return self.strings[str_key]
else:
print('Missing string: ' + str_key)
return str_key
def convert_bmp_to_raw(self):
files = filedialog.askopenfilenames()
img, config, config_order = io.load_bmp_files(files)
out_path = ''
for i in zip(files[0], files[-1]):
if i[0] == i[1]:
out_path += i[0]
else:
break
out_path += '.raw'
io.save_raw(out_path, img, config, config_order)
messagebox.showinfo('Done converting',
f'Raw image with config saved as {out_path}')
def start():
interface = Interface()
interface.root.mainloop()
|
the-stack_106_24014 | # coding=utf-8
# Copyright 2020 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.testing_utils import require_tokenizers
from transformers.tokenization_funnel import VOCAB_FILES_NAMES
from .test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class FunnelTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FunnelTokenizer
rust_tokenizer_class = FunnelTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
def setUp(self):
super().setUp()
vocab_tokens = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return FunnelTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_token_type_ids(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
inputs = tokenizer("UNwant\u00E9d,running")
sentence_len = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len)
inputs = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len)
|
the-stack_106_24018 | import cv2
import numpy as np
import posenet.constants
def valid_resolution(width, height, output_stride=16):
target_width = (int(width) // output_stride) * output_stride + 1
target_height = (int(height) // output_stride) * output_stride + 1
return target_width, target_height
def _process_input(source_img, scale_factor=1.0, output_stride=16):
target_width, target_height = valid_resolution(
source_img.shape[1] * scale_factor, source_img.shape[0] * scale_factor, output_stride=output_stride)
scale = np.array([source_img.shape[0] / target_height, source_img.shape[1] / target_width])
input_img = cv2.resize(source_img, (target_width, target_height), interpolation=cv2.INTER_LINEAR)
input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB).astype(np.float32)
input_img = input_img * (2.0 / 255.0) - 1.0
input_img = input_img.reshape(1, target_height, target_width, 3)
return input_img, source_img, scale
def read_cap(cap, scale_factor=1.0, output_stride=16, mirror_flip=False, use_webcam=True):
res, img = cap.read()
if not res:
if use_webcam:
raise IOError("webcam failure")
else:
return None, None, 1.0
else:
if mirror_flip:
img = cv2.flip(img, 1)
return _process_input(img, scale_factor, output_stride)
def read_imgfile(path, scale_factor=1.0, output_stride=16):
img = cv2.imread(path)
return _process_input(img, scale_factor, output_stride)
def draw_keypoints(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_confidence:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
return out_img
def get_adjacent_keypoints(keypoint_scores, keypoint_coords, min_confidence=0.1):
results = []
for left, right in posenet.CONNECTED_PART_INDICES:
if keypoint_scores[left] < min_confidence or keypoint_scores[right] < min_confidence:
continue
results.append(
np.array([keypoint_coords[left][::-1], keypoint_coords[right][::-1]]).astype(np.int32),
)
return results
def draw_skeleton(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
out_img = img
adjacent_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_confidence)
adjacent_keypoints.extend(new_keypoints)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
def draw_skel_and_kp(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.5, min_part_score=0.5):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
|
the-stack_106_24019 | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from oslo_config import cfg
import sqlalchemy as sa
from sqlalchemy import sql
from neutron.conf.db import l3_gwmode_db
from neutron.db import l3_db
from neutron.db.models import l3 as l3_models
l3_gwmode_db.register_db_l3_gwmode_opts()
# Modify the Router Data Model adding the enable_snat attribute
setattr(l3_models.Router, 'enable_snat',
sa.Column(sa.Boolean, default=True, server_default=sql.true(),
nullable=False))
@resource_extend.has_resource_extenders
class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
"""Mixin class to add configurable gateway modes."""
@staticmethod
@resource_extend.extends([l3_apidef.ROUTERS])
def _extend_router_dict_gw_mode(router_res, router_db):
if router_db.gw_port_id:
nw_id = router_db.gw_port['network_id']
router_res[l3_apidef.EXTERNAL_GW_INFO].update({
'network_id': nw_id,
'enable_snat': router_db.enable_snat,
'external_fixed_ips': [
{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in router_db.gw_port['fixed_ips']
]
})
def _update_router_gw_info(self, context, router_id, info, router=None):
with db_api.CONTEXT_WRITER.using(context):
# Always load the router inside the DB context.
router = self._get_router(context, router_id)
old_router = self._make_router_dict(router)
router.enable_snat = self._get_enable_snat(info)
router_body = {l3_apidef.ROUTER:
{l3_apidef.EXTERNAL_GW_INFO: info}}
registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self,
payload=events.DBEventPayload(
context, request_body=router_body,
states=(old_router,), resource_id=router_id,
desired_state=router))
# Calls superclass, pass router db object for avoiding re-loading
super(L3_NAT_dbonly_mixin, self)._update_router_gw_info(
context, router_id, info, router=router)
# Returning the router might come back useful if this
# method is overridden in child classes
return self._get_router(context, router_id)
@staticmethod
def _get_enable_snat(info):
if info and 'enable_snat' in info:
return info['enable_snat']
# if enable_snat is not specified then use the default value
return cfg.CONF.enable_snat_by_default
def _build_routers_list(self, context, routers, gw_ports):
routers = super(L3_NAT_dbonly_mixin, self)._build_routers_list(
context, routers, gw_ports)
for rtr in routers:
gw_port_id = rtr['gw_port_id']
# Collect gw ports only if available
if gw_port_id and gw_ports.get(gw_port_id):
rtr['gw_port'] = gw_ports[gw_port_id]
# Add enable_snat key
rtr['enable_snat'] = rtr[
l3_apidef.EXTERNAL_GW_INFO]['enable_snat']
return routers
class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, l3_db.L3_NAT_db_mixin):
pass
|
the-stack_106_24020 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import time
import IECore
import Gaffer
import GafferTest
class ComputeNodeTest( GafferTest.TestCase ) :
def testOperation( self ) :
n1 = GafferTest.AddNode()
n1["sum"].getValue()
dirtiedPlugs = GafferTest.CapturingSlot( n1.plugDirtiedSignal() )
setPlugs = GafferTest.CapturingSlot( n1.plugSetSignal() )
n1["op1"].setValue( 2 )
self.assertEqual( len( setPlugs ), 1 )
self.assertEqual( len( dirtiedPlugs ), 2 )
self.assertEqual( setPlugs[0][0].fullName(), "AddNode.op1" )
self.assertEqual( dirtiedPlugs[0][0].fullName(), "AddNode.op1" )
self.assertEqual( dirtiedPlugs[1][0].fullName(), "AddNode.sum" )
n1["op2"].setValue( 3 )
self.assertEqual( len( setPlugs ), 2 )
self.assertEqual( setPlugs[1][0].fullName(), "AddNode.op2" )
del dirtiedPlugs[:]
del setPlugs[:]
# plug set or dirty signals are not emitted during computation
self.assertEqual( n1.getChild("sum").getValue(), 5 )
self.assertEqual( len( setPlugs ), 0 )
self.assertEqual( len( dirtiedPlugs ), 0 )
# connect another add node onto the output of this one
n2 = GafferTest.AddNode( "Add2" )
dirtiedPlugs2 = GafferTest.CapturingSlot( n2.plugDirtiedSignal() )
setPlugs2 = GafferTest.CapturingSlot( n2.plugSetSignal() )
n2["op1"].setInput( n1["sum"] )
# connecting a plug doesn't set the value of the input plug
# immediately - the value is transferred only upon request.
self.assertEqual( len( setPlugs2 ), 0 )
self.assertEqual( len( dirtiedPlugs2 ), 2 )
self.assertEqual( dirtiedPlugs2[0][0].fullName(), "Add2.op1" )
self.assertEqual( dirtiedPlugs2[1][0].fullName(), "Add2.sum" )
del dirtiedPlugs2[:]
del setPlugs2[:]
self.assertEqual( n2["op1"].getValue(), 5 )
self.assertEqual( n2["sum"].getValue(), 5 )
# plug set or dirty signals are not emitted during computation
self.assertEqual( len( setPlugs2 ), 0 )
self.assertEqual( len( dirtiedPlugs2 ), 0 )
def testDirtyOfInputsWithConnections( self ) :
n1 = GafferTest.AddNode( "n1" )
n2 = GafferTest.AddNode( "n2" )
dirtied = GafferTest.CapturingSlot( n1.plugDirtiedSignal(), n2.plugDirtiedSignal() )
n2["op1"].setInput( n1["sum"] )
self.assertEqual( len( dirtied ), 2 )
self.failUnless( dirtied[0][0].isSame( n2["op1"] ) )
self.failUnless( dirtied[1][0].isSame( n2["sum"] ) )
del dirtied[:]
n1["op1"].setValue( 10 )
self.assertEqual( len( dirtied ), 4 )
self.failUnless( dirtied[0][0].isSame( n1["op1"] ) )
self.failUnless( dirtied[1][0].isSame( n1["sum"] ) )
self.failUnless( dirtied[2][0].isSame( n2["op1"] ) )
self.failUnless( dirtied[3][0].isSame( n2["sum"] ) )
self.assertEqual( n2.getChild( "sum" ).getValue(), 10 )
def testDirtyPlugComputesSameValueAsBefore( self ) :
n1 = GafferTest.AddNode( "N1" )
n2 = GafferTest.AddNode( "N2" )
n2.getChild( "op1" ).setInput( n1.getChild( "sum" ) )
n1.getChild( "op1" ).setValue( 1 )
n1.getChild( "op2" ).setValue( -1 )
self.assertEqual( n2.getChild( "sum" ).getValue(), 0 )
def testOutputsDirtyForNewNodes( self ) :
n = GafferTest.AddNode()
n["op1"].setValue( 1 )
n["op2"].setValue( 2 )
self.assertEqual( n["sum"].getValue(), 3 )
def testComputeInContext( self ) :
n = GafferTest.FrameNode()
self.assertEqual( n["output"].getValue(), 1 )
c = Gaffer.Context()
c.setFrame( 10 )
with c :
self.assertEqual( n["output"].getValue(), 10 )
def testComputeInThreads( self ) :
n = GafferTest.FrameNode()
def f( frame ) :
c = Gaffer.Context()
c.setFrame( frame )
with c :
time.sleep( 0.01 )
self.assertEqual( n["output"].getValue(), frame )
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = f, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
def testDirtyNotPropagatedDuringCompute( self ) :
n1 = GafferTest.AddNode( "n1" )
n2 = GafferTest.AddNode( "n2" )
n1["op1"].setValue( 2 )
n1["op2"].setValue( 3 )
n2["op1"].setInput( n1["sum"] )
dirtyCapturer = GafferTest.CapturingSlot( n2.plugDirtiedSignal() )
self.assertEqual( n2["sum"].getValue(), 5 )
self.assertEqual( len( dirtyCapturer ), 0 )
def testWrongPlugSet( self ) :
n = GafferTest.BadNode()
self.assertRaises( RuntimeError, n["out1"].getValue )
def testPlugNotSet( self ) :
n = GafferTest.BadNode()
self.assertRaises( RuntimeError, n["out3"].getValue )
def testHash( self ) :
n = GafferTest.MultiplyNode()
self.assertHashesValid( n )
def testHashForPythonDerivedClasses( self ) :
n = GafferTest.AddNode()
self.assertHashesValid( n )
def testDisableCaching( self ) :
n = GafferTest.CachingTestNode()
n["in"].setValue( "d" )
v1 = n["out"].getValue( _copy=False )
v2 = n["out"].getValue( _copy=False )
self.assertEqual( v1, v2 )
self.assertEqual( v1, IECore.StringData( "d" ) )
# the objects should be one and the same, as the second computation
# should have shortcut and returned a cached result.
self.failUnless( v1.isSame( v2 ) )
n["out"].setFlags( Gaffer.Plug.Flags.Cacheable, False )
v3 = n["out"].getValue( _copy=False )
self.assertEqual( v3, IECore.StringData( "d" ) )
self.assertEqual( v3, v1 )
# we disabled caching, so the two values should
# be distinct objects, even though they are equal.
self.failIf( v3.isSame( v1 ) )
def testConnectedPlugsShareHashesAndCacheEntries( self ) :
class Out( Gaffer.ComputeNode ) :
def __init__( self, name="Out" ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.ObjectPlug( "oOut", Gaffer.Plug.Direction.Out, IECore.NullObject() ) )
self.addChild( Gaffer.FloatPlug( "fOut", Gaffer.Plug.Direction.Out ) )
def affects( self, input ) :
return []
def hash( self, output, context, h ) :
h.append( context.getFrame() )
def compute( self, plug, context ) :
if plug.getName() == "oOut" :
plug.setValue( IECore.IntData( int( context.getFrame() ) ) )
else :
plug.setValue( context.getFrame() )
IECore.registerRunTimeTyped( Out )
class In( Gaffer.ComputeNode ) :
def __init__( self, name="In" ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.ObjectPlug( "oIn", Gaffer.Plug.Direction.In, IECore.NullObject() ) )
self.addChild( Gaffer.IntPlug( "iIn", Gaffer.Plug.Direction.In ) )
IECore.registerRunTimeTyped( In )
nOut = Out()
nIn = In()
nIn["oIn"].setInput( nOut["oOut"] )
nIn["iIn"].setInput( nOut["fOut"] )
for i in range( 0, 1000 ) :
c = Gaffer.Context()
c.setFrame( i )
with c :
# because oIn and oOut are connected, they should
# have the same hash and share the exact same value.
self.assertEqual( nIn["oIn"].getValue(), IECore.IntData( i ) )
self.assertEqual( nOut["oOut"].getValue(), IECore.IntData( i ) )
self.assertEqual( nIn["oIn"].hash(), nOut["oOut"].hash() )
self.failUnless( nIn["oIn"].getValue( _copy=False ).isSame( nOut["oOut"].getValue( _copy=False ) ) )
# even though iIn and fOut are connected, they should have
# different hashes and different values, because type conversion
# (float to int) is performed when connecting them.
self.assertEqual( nIn["iIn"].getValue(), i )
self.assertEqual( nOut["fOut"].getValue(), float( i ) )
self.assertNotEqual( nIn["iIn"].hash(), nOut["fOut"].hash() )
class PassThrough( Gaffer.ComputeNode ) :
def __init__( self, name="PassThrough", inputs={}, dynamicPlugs=() ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.ObjectPlug( "in", Gaffer.Plug.Direction.In, IECore.NullObject() ) )
self.addChild( Gaffer.ObjectPlug( "out", Gaffer.Plug.Direction.Out, IECore.NullObject() ) )
def affects( self, input ) :
if input.isSame( self["in"] ) :
return [ self["out"] ]
return []
def hash( self, output, context, h ) :
assert( output.isSame( self["out"] ) )
# by assigning directly to the hash rather than appending,
# we signify that we'll pass through the value unchanged.
h.copyFrom( self["in"].hash() )
def compute( self, plug, context ) :
assert( plug.isSame( self["out"] ) )
plug.setValue( self["in"].getValue( _copy=False ), _copy=False )
IECore.registerRunTimeTyped( PassThrough )
def testPassThroughSharesHashes( self ) :
n = self.PassThrough()
n["in"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) )
self.assertEqual( n["in"].hash(), n["out"].hash() )
self.assertEqual( n["in"].getValue(), n["out"].getValue() )
def testPassThroughSharesCacheEntries( self ) :
n = self.PassThrough()
n["in"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) )
# this fails because TypedObjectPlug::setValue() currently does a copy. i think we can
# optimise things by allowing a copy-free setValue() function for use during computations.
self.failUnless( n["in"].getValue( _copy=False ).isSame( n["out"].getValue( _copy=False ) ) )
def testInternalConnections( self ) :
a = GafferTest.AddNode()
a["op1"].setValue( 10 )
n = Gaffer.Node()
n["in"] = Gaffer.IntPlug()
n["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
n["out"].setInput( n["in"] )
n["in"].setInput( a["sum"] )
self.assertEqual( n["out"].getValue(), a["sum"].getValue() )
self.assertEqual( n["out"].hash(), a["sum"].hash() )
def testErrorSignal( self ) :
b = GafferTest.BadNode()
a = GafferTest.AddNode()
a["op1"].setInput( b["out3"] )
cs = GafferTest.CapturingSlot( b.errorSignal() )
self.assertRaises( RuntimeError, b["out1"].getValue )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( b["out1"] ) )
self.assertTrue( cs[0][1].isSame( b["out1"] ) )
self.assertTrue( isinstance( cs[0][2], str ) )
self.assertRaises( RuntimeError, a["sum"].getValue )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[1][0].isSame( b["out3"] ) )
self.assertTrue( cs[1][1].isSame( b["out3"] ) )
self.assertTrue( isinstance( cs[1][2], str ) )
def testErrorSignalledOnIntermediateNodes( self ) :
nodes = [ GafferTest.BadNode() ]
for i in range( 0, 10 ) :
nodes.append( GafferTest.AddNode() )
nodes[-1]["op1"].setInput(
nodes[-2]["sum"] if i != 0 else nodes[-2]["out3"]
)
slots = [ GafferTest.CapturingSlot( n.errorSignal() ) for n in nodes ]
self.assertRaises( RuntimeError, nodes[-1]["sum"].getValue )
for i, slot in enumerate( slots ) :
self.assertEqual( len( slot ), 1 )
self.assertTrue( slot[0][0].isSame( nodes[i]["out3"] if i == 0 else nodes[i]["sum"] ) )
self.assertTrue( slot[0][1].isSame( nodes[0]["out3"] ) )
def testErrorSignalledAtScopeTransitions( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["b"] = GafferTest.BadNode()
s["b"]["a"] = GafferTest.AddNode()
s["b"]["a"]["op1"].setInput( s["b"]["b"]["out3"] )
css = GafferTest.CapturingSlot( s.errorSignal() )
csb = GafferTest.CapturingSlot( s["b"].errorSignal() )
csbb = GafferTest.CapturingSlot( s["b"]["b"].errorSignal() )
p = s["b"].promotePlug( s["b"]["a"]["sum"] )
self.assertRaises( RuntimeError, p.getValue )
self.assertEqual( len( css ), 0 )
self.assertEqual( len( csb ), 1 )
self.assertTrue( csb[0][0].isSame( p ) )
self.assertTrue( csb[0][1].isSame( s["b"]["b"]["out3"] ) )
self.assertEqual( len( csbb ), 1 )
self.assertTrue( csbb[0][0].isSame( s["b"]["b"]["out3"] ) )
self.assertTrue( csbb[0][1].isSame( s["b"]["b"]["out3"] ) )
def testErrorSlotsDontSeeException( self ) :
self.fRan = False
def f( *unusedArgs ) :
# If there's an active python exception (from
# the error in BadNode below) when we try this
# import, it'll appear (falsely) as if the error
# originated from the import, and throw an exception
# here. This is not the intention - error slots are
# just meant to be informed of the error, without
# ever seeing the exception itself.
import IECore
self.fRan = True
n = GafferTest.BadNode()
c = n.errorSignal().connect( f )
with IECore.IgnoredExceptions( Exception ) :
n["out1"].getValue()
self.assertTrue( self.fRan )
def testThreading( self ) :
GafferTest.testComputeNodeThreading()
if __name__ == "__main__":
unittest.main()
|
the-stack_106_24022 | # Test the Corpus module.
import sys
import time
import unittest
import sb_test_support
sb_test_support.fix_sys_path()
from spambayes.Corpus import Corpus, ExpiryCorpus, MessageFactory
# We borrow the test messages that test_sb_server uses.
from test_sb_server import good1, spam1, malformed1
class simple_msg(object):
def __init__(self, key):
self._key = key
self.creation_time = time.time()
self.loaded = False
def createTimestamp(self):
return self.creation_time
def key(self):
return self._key
def load(self):
self.loaded = True
class simple_observer(object):
# Just want to tell that they have been called, so raise particular
# errors.
def onAddMessage(self, msg, flags):
raise ValueError()
def onRemoveMessage(self, msg, flags):
raise TypeError()
class CorpusTest(unittest.TestCase):
def setUp(self):
self.factory = MessageFactory()
self.cacheSize = 100
self.corpus = Corpus(self.factory, self.cacheSize)
def test___init__(self):
self.assertEqual(self.corpus.cacheSize, self.cacheSize)
self.assertEqual(self.corpus.msgs, {})
self.assertEqual(self.corpus.keysInMemory, [])
self.assertEqual(self.corpus.observers, [])
self.assertEqual(self.corpus.factory, self.factory)
def test_addObserver(self):
self.corpus.addObserver(simple_observer())
self.assertRaises(ValueError, self.corpus.addMessage,
simple_msg(0))
self.assertRaises(TypeError, self.corpus.removeMessage,
simple_msg(1))
def test_addMessage(self):
msg = simple_msg(0)
self.assertEqual(self.corpus.get(0), None)
self.corpus.addMessage(msg)
self.assertEqual(self.corpus[0], msg)
def test_removeMessage(self):
msg = simple_msg(0)
self.assertEqual(self.corpus.get(0), None)
self.corpus.addMessage(msg)
self.assertEqual(self.corpus[0], msg)
self.corpus.removeMessage(msg)
self.assertEqual(self.corpus.get(0), None)
def test_cacheMessage(self):
msg = simple_msg(0)
self.corpus.cacheMessage(msg)
self.assertEqual(self.corpus.msgs[0], msg)
self.assert_(0 in self.corpus.keysInMemory)
def test_flush_cache(self):
self.corpus.cacheSize = 1
msg = simple_msg(0)
self.corpus.cacheMessage(msg)
self.assertEqual(self.corpus.msgs[0], msg)
self.assert_(0 in self.corpus.keysInMemory)
msg = simple_msg(1)
self.corpus.cacheMessage(msg)
self.assertEqual(self.corpus.msgs[1], msg)
self.assert_(1 in self.corpus.keysInMemory)
self.assert_(0 not in self.corpus.keysInMemory)
def test_unCacheMessage(self):
msg = simple_msg(0)
self.corpus.cacheMessage(msg)
self.assertEqual(self.corpus.msgs[0], msg)
self.assert_(0 in self.corpus.keysInMemory)
self.corpus.unCacheMessage(msg)
self.assert_(0 in self.corpus.keysInMemory)
def test_takeMessage(self):
other_corpus = Corpus(self.factory, self.cacheSize)
msg = simple_msg(0)
other_corpus.addMessage(msg)
self.assertEqual(self.corpus.get(0), None)
self.corpus.takeMessage(0, other_corpus)
self.assertEqual(msg.loaded, True)
self.assertEqual(other_corpus.get(0), None)
self.assertEqual(self.corpus.get(0), msg)
def test_get(self):
ids = [0, 1, 2]
for id in ids:
self.corpus.addMessage(simple_msg(id))
self.assertEqual(self.corpus.get(0).key(), 0)
def test_get_fail(self):
ids = [0, 1, 2]
for id in ids:
self.corpus.addMessage(simple_msg(id))
self.assertEqual(self.corpus.get(4), None)
def test_get_default(self):
ids = [0, 1, 2]
for id in ids:
self.corpus.addMessage(simple_msg(id))
self.assertEqual(self.corpus.get(4, "test"), "test")
def test___getitem__(self):
ids = [0, 1, 2]
for id in ids:
self.corpus.addMessage(simple_msg(id))
self.assertEqual(self.corpus[0].key(), 0)
def test___getitem___fail(self):
ids = [0, 1, 2]
for id in ids:
self.corpus.addMessage(simple_msg(id))
self.assertRaises(KeyError, self.corpus.__getitem__, 4)
def test_keys(self):
self.assertEqual(self.corpus.keys(), [])
ids = [0, 1, 2]
for id in ids:
self.corpus.addMessage(simple_msg(id))
self.assertEqual(self.corpus.keys(), ids)
def test___iter__(self):
self.assertEqual(tuple(self.corpus), ())
msgs = (simple_msg(0), simple_msg(1), simple_msg(2))
for msg in msgs:
self.corpus.addMessage(msg)
self.assertEqual(tuple(self.corpus), msgs)
def test_makeMessage_no_content(self):
key = "testmessage"
self.assertRaises(NotImplementedError, self.corpus.makeMessage, key)
def test_makeMessage_with_content(self):
key = "testmessage"
content = good1
self.assertRaises(NotImplementedError, self.corpus.makeMessage,
key, content)
class ExpiryCorpusTest(unittest.TestCase):
def setUp(self):
class Mixed(Corpus, ExpiryCorpus):
def __init__(self, expireBefore, factory, cacheSize):
Corpus.__init__(self, factory, cacheSize)
ExpiryCorpus.__init__(self, expireBefore)
self.factory = MessageFactory()
self.cacheSize = 100
self.expireBefore = 10.0
self.corpus = Mixed(self.expireBefore, self.factory,
self.cacheSize)
def test___init___expiry(self):
self.assertEqual(self.corpus.expireBefore, self.expireBefore)
def test_removeExpiredMessages(self):
# Put messages in to expire.
expire = [simple_msg(1), simple_msg(2)]
for msg in expire:
self.corpus.addMessage(msg)
# Ensure that we don't expire the wrong ones.
self.corpus.expireBefore = 0.25
time.sleep(0.5)
# Put messages in to not expire.
not_expire = [simple_msg(3), simple_msg(4)]
for msg in not_expire:
self.corpus.addMessage(msg)
# Run expiry.
self.corpus.removeExpiredMessages()
# Check that expired messages are gone.
for msg in expire:
self.assertEqual(msg in self.corpus, False)
# Check that not expired messages are still there.
for msg in not_expire:
self.assertEqual(msg in self.corpus, True)
def suite():
suite = unittest.TestSuite()
clses = (CorpusTest,
ExpiryCorpusTest,
)
for cls in clses:
suite.addTest(unittest.makeSuite(cls))
return suite
if __name__=='__main__':
sb_test_support.unittest_main(argv=sys.argv + ['suite'])
|
the-stack_106_24023 | import json
import unittest
from flask_caching import Cache
from sqlalchemy import asc
from app import app, db
from apps.comments.models import CommentsSongs
from apps.songs.models import Songs
from apps.users.models import Users, UsersAccessLevels, UsersAccessMapping, UsersAccessTokens
from apps.utils.time import get_datetime, get_datetime_one_hour_ahead
class TestCommentsSongsView(unittest.TestCase):
def setUp(self):
# Clear redis cache completely
cache = Cache()
cache.init_app(app, config={"CACHE_TYPE": "RedisCache"})
with app.app_context():
cache.clear()
self.app = app.test_client()
# Add three songs
song1 = Songs(
Title="UnitTest1",
Duration=123
)
song2 = Songs(
Title="UnitTest2",
Duration=123
)
song3 = Songs(
Title="UnitTest3",
Duration=123
)
db.session.add(song1)
db.session.add(song2)
db.session.add(song3)
db.session.commit()
# Add two registered users
user1 = Users(
Name="UnitTest1",
Username="unittester1",
Password="unittest1",
Created=get_datetime(),
)
user2 = Users(
Name="UnitTest2",
Username="unittester2",
Password="unittest2",
Created=get_datetime(),
)
db.session.add(user1)
db.session.add(user2)
db.session.commit()
# Add user level for registered users, if not already
if not UsersAccessLevels.query.filter_by(LevelName="Registered").first():
registered = UsersAccessLevels(
UsersAccessLevelID=2,
LevelName="Registered"
)
db.session.add(registered)
db.session.commit()
register_user1 = UsersAccessMapping(
UserID=user1.UserID,
UsersAccessLevelID=2
)
register_user2 = UsersAccessMapping(
UserID=user2.UserID,
UsersAccessLevelID=2
)
self.access_token1 = "unittest1-access-token"
self.access_token2 = "unittest2-access-token"
user1_token = UsersAccessTokens(
UserID=user1.UserID,
AccessToken=self.access_token1,
ExpirationDate=get_datetime_one_hour_ahead()
)
user2_token = UsersAccessTokens(
UserID=user2.UserID,
AccessToken=self.access_token2,
ExpirationDate=get_datetime_one_hour_ahead()
)
db.session.add(register_user1)
db.session.add(register_user2)
db.session.add(user1_token)
db.session.add(user2_token)
db.session.commit()
self.valid_users = [user1.UserID, user2.UserID]
self.valid_tokens = [self.access_token1, self.access_token2]
self.song_ids = [song1.SongID, song2.SongID, song3.SongID]
# Add some comments for each song
s1_comment1 = CommentsSongs(
SongID=self.song_ids[0],
Comment="S1C1 Comment",
UserID=self.valid_users[0],
Created=get_datetime()
)
s1_comment2 = CommentsSongs(
SongID=self.song_ids[0],
Comment="S1C2 Comment",
UserID=self.valid_users[1],
Created=get_datetime()
)
s2_comment1 = CommentsSongs(
SongID=self.song_ids[1],
Comment="S2C1 Comment",
UserID=self.valid_users[0],
Created=get_datetime()
)
s3_comment1 = CommentsSongs(
SongID=self.song_ids[2],
Comment="S3C1 Comment",
UserID=self.valid_users[0],
Created=get_datetime()
)
db.session.add(s1_comment1)
db.session.add(s1_comment2)
db.session.add(s2_comment1)
db.session.add(s3_comment1)
db.session.commit()
self.valid_comment_ids = [
s1_comment1.CommentID,
s1_comment2.CommentID,
s2_comment1.CommentID,
s3_comment1.CommentID,
]
self.valid_comment_ids_userid = [
s1_comment1.UserID,
s1_comment2.UserID,
s2_comment1.UserID,
s3_comment1.UserID,
]
def tearDown(self):
# Deleting a song will also delete the comments for it
for song in Songs.query.all():
db.session.delete(song)
db.session.commit()
for user in Users.query.filter(Users.Username.like("unittest%")).all():
db.session.delete(user)
db.session.commit()
access = UsersAccessLevels.query.filter_by(LevelName="Registered").first()
db.session.delete(access)
db.session.commit()
def test_getting_all_comments(self):
"""Should return the current votes for all songs."""
response = self.app.get("/api/1.0/comments/songs/")
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertNotEqual(None, data)
self.assertEqual(4, len(data["comments"]))
self.assertTrue(data["comments"][0]["songID"] in self.song_ids)
self.assertTrue(data["comments"][1]["songID"] in self.song_ids)
self.assertTrue(data["comments"][2]["songID"] in self.song_ids)
self.assertTrue(data["comments"][3]["songID"] in self.song_ids)
def test_getting_comments_for_one_song(self):
"""Should return the comments for the specified song."""
response = self.app.get("/api/1.0/comments/songs/{}".format(self.song_ids[0]))
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertNotEqual(None, data)
self.assertEqual(2, len(data["comments"]))
self.assertEqual(self.song_ids[0], data["comments"][0]["songID"])
self.assertEqual("UnitTest1", data["comments"][0]["name"])
self.assertEqual("S1C1 Comment", data["comments"][0]["comment"])
self.assertEqual("UnitTest2", data["comments"][1]["name"])
self.assertEqual("S1C2 Comment", data["comments"][1]["comment"])
def test_adding_a_comment_as_registered_user(self):
"""Should add a new comment with the userID."""
response = self.app.post(
"/api/1.0/comments/songs/",
data=json.dumps(
dict(
songID=self.song_ids[2],
comment="S3 UnitTest Brand New"
)
),
content_type="application/json",
headers={
"User": self.valid_users[1],
"Authorization": self.valid_tokens[1]
}
)
comments = CommentsSongs.query.filter_by(SongID=self.song_ids[2]).order_by(
asc(CommentsSongs.CommentID)
).all()
self.assertEqual(201, response.status_code)
self.assertEqual(2, len(comments))
self.assertEqual("S3C1 Comment", comments[0].Comment)
self.assertEqual("S3 UnitTest Brand New", comments[1].Comment)
self.assertEqual(self.valid_users[1], comments[1].UserID)
def test_adding_a_comment_as_registered_user_with_invalid_token(self):
"""Should throw a 401, since it is an invalid case."""
response = self.app.post(
"/api/1.0/comments/songs/",
data=json.dumps(
dict(
songID=self.song_ids[2],
comment="S3 UnitTest Comment Same",
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": "not valid"
}
)
comments = CommentsSongs.query.filter_by(SongID=self.song_ids[1]).order_by(
asc(CommentsSongs.CommentID)
).all()
self.assertEqual(401, response.status_code)
self.assertEqual(1, len(comments))
self.assertEqual("S2C1 Comment", comments[0].Comment)
def test_adding_another_comment_as_registered_user_for_same_song(self):
"""Should add a second comment normally."""
response = self.app.post(
"/api/1.0/comments/songs/",
data=json.dumps(
dict(
songID=self.song_ids[2],
comment="S3 UnitTest Comment Same",
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
comments = CommentsSongs.query.filter_by(SongID=self.song_ids[2]).order_by(
asc(CommentsSongs.CommentID)
).all()
self.assertEqual(201, response.status_code)
self.assertEqual(2, len(comments))
self.assertEqual("S3C1 Comment", comments[0].Comment)
self.assertEqual("S3 UnitTest Comment Same", comments[1].Comment)
def test_editing_a_comment(self):
"""Should modify an existing comment."""
response = self.app.put(
"api/1.0/comments/songs/{}".format(self.song_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0],
comment="UnitTest Edited"
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
comments = CommentsSongs.query.filter_by(SongID=self.song_ids[0]).order_by(
asc(CommentsSongs.CommentID)
).all()
self.assertEqual(200, response.status_code)
self.assertEqual(2, len(comments))
self.assertEqual("UnitTest Edited", comments[0].Comment)
self.assertEqual("S1C2 Comment", comments[1].Comment)
def test_editing_a_comment_without_comment_id(self):
"""Should return 400 Bad Request."""
response = self.app.put(
"api/1.0/comments/songs/{}".format(self.song_ids[0]),
data=json.dumps(
dict(
comment="UnitTest Edited"
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(400, response.status_code)
def test_editing_a_comment_without_comment(self):
"""Should return 400 Bad Request."""
response = self.app.put(
"api/1.0/comments/songs/{}".format(self.song_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0]
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(400, response.status_code)
def test_editing_a_comment_with_wrong_userid(self):
"""Should return 401 Unauthorized. You can only edit your own comments."""
response = self.app.put(
"api/1.0/comments/songs/{}".format(self.song_ids[1]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[1],
comment="UnitTest Edited"
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(401, response.status_code)
def test_deleting_a_comment(self):
"""Should delete the comment."""
response = self.app.delete(
"api/1.0/comments/songs/{}".format(self.song_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0]
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[0],
"Authorization": self.valid_tokens[0]
}
)
comments = CommentsSongs.query.filter_by(SongID=self.song_ids[0]).all()
self.assertEqual(204, response.status_code)
self.assertEqual(1, len(comments))
self.assertEqual("S1C2 Comment", comments[0].Comment)
def test_deleting_a_comment_with_invalid_comment_id(self):
"""Should return 400 Bad Request."""
response = self.app.delete(
"api/1.0/comments/songs/{}".format(self.song_ids[0]),
data=json.dumps(
dict(
commentID=None
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(400, response.status_code)
def test_deleting_a_comment_with_invalid_user_id(self):
"""Should return 401 Unauthorized."""
response = self.app.delete(
"api/1.0/comments/songs/{}".format(self.song_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0]
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[1],
"Authorization": self.valid_tokens[1]
}
)
self.assertEqual(401, response.status_code)
|
the-stack_106_24024 | # coding: utf-8
import pprint
import six
from enum import Enum
class User:
swagger_types = {
'id': 'int',
'planned_purge_date': 'datetime',
'scope': 'Scope',
'state': 'CreationEntityState',
'user_type': 'UserType',
'version': 'int',
}
attribute_map = {
'id': 'id','planned_purge_date': 'plannedPurgeDate','scope': 'scope','state': 'state','user_type': 'userType','version': 'version',
}
_id = None
_planned_purge_date = None
_scope = None
_state = None
_user_type = None
_version = None
def __init__(self, **kwargs):
self.discriminator = None
self.id = kwargs.get('id', None)
self.planned_purge_date = kwargs.get('planned_purge_date', None)
self.scope = kwargs.get('scope', None)
self.state = kwargs.get('state', None)
self.user_type = kwargs.get('user_type', None)
self.version = kwargs.get('version', None)
@property
def id(self):
"""Gets the id of this User.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:return: The id of this User.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this User.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:param id: The id of this User.
:type: int
"""
self._id = id
@property
def planned_purge_date(self):
"""Gets the planned_purge_date of this User.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:return: The planned_purge_date of this User.
:rtype: datetime
"""
return self._planned_purge_date
@planned_purge_date.setter
def planned_purge_date(self, planned_purge_date):
"""Sets the planned_purge_date of this User.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:param planned_purge_date: The planned_purge_date of this User.
:type: datetime
"""
self._planned_purge_date = planned_purge_date
@property
def scope(self):
"""Gets the scope of this User.
:return: The scope of this User.
:rtype: Scope
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this User.
:param scope: The scope of this User.
:type: Scope
"""
self._scope = scope
@property
def state(self):
"""Gets the state of this User.
:return: The state of this User.
:rtype: CreationEntityState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this User.
:param state: The state of this User.
:type: CreationEntityState
"""
self._state = state
@property
def user_type(self):
"""Gets the user_type of this User.
:return: The user_type of this User.
:rtype: UserType
"""
return self._user_type
@user_type.setter
def user_type(self, user_type):
"""Sets the user_type of this User.
:param user_type: The user_type of this User.
:type: UserType
"""
self._user_type = user_type
@property
def version(self):
"""Gets the version of this User.
The version number indicates the version of the entity. The version is incremented whenever the entity is changed.
:return: The version of this User.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this User.
The version number indicates the version of the entity. The version is incremented whenever the entity is changed.
:param version: The version of this User.
:type: int
"""
self._version = version
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(User, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, User):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_106_24025 | from rubicon.objc import (
CGPoint,
objc_method
)
from travertino.size import at_least
from toga_iOS.libs import (
NSLayoutAttributeBottom,
NSLayoutAttributeLeading,
NSLayoutAttributeTop,
NSLayoutAttributeTrailing,
NSLayoutConstraint,
NSLayoutRelationEqual,
UILabel,
UITextView,
)
from toga_iOS.widgets.base import Widget
class TogaMultilineTextView(UITextView):
@objc_method
def pointInside_withEvent_(self, point: CGPoint, event) -> bool:
# To keep consistency with non-mobile platforms, we'll resign the
# responder status when you tap somewhere else outside this view
# (except the keyboard)
within_x = point.x > 0 and point.x < self.frame.size.width
within_y = point.y > 0 and point.y < self.frame.size.height
in_view = within_x and within_y
if not in_view:
self.resignFirstResponder()
return in_view
@objc_method
def textViewShouldEndEditing_(self, text_view):
return True
@objc_method
def textViewDidBeginEditing_(self, text_view):
self.placeholder_label.setHidden_(True)
@objc_method
def textViewDidEndEditing_(self, text_view):
self.placeholder_label.setHidden_(len(text_view.text) > 0)
class MultilineTextInput(Widget):
def create(self):
self.native = TogaMultilineTextView.alloc().init()
self.native.delegate = self.native
# Placeholder isn't natively supported, so we create our
# own
self.placeholder_label = UILabel.alloc().init()
self.placeholder_label.translatesAutoresizingMaskIntoConstraints = False
self.placeholder_label.font = self.native.font
self.placeholder_label.alpha = 0.5
self.native.addSubview_(self.placeholder_label)
self.constrain_placeholder_label()
# Delegate needs to update the placeholder depending on
# input, so we give it just that to avoid a retain cycle
self.native.placeholder_label = self.placeholder_label
self.add_constraints()
def constrain_placeholder_label(self):
leading_constraint = \
NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
self.placeholder_label,
NSLayoutAttributeLeading,
NSLayoutRelationEqual,
self.native,
NSLayoutAttributeLeading,
1.0,
4.0
)
trailing_constraint = \
NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
self.placeholder_label,
NSLayoutAttributeTrailing,
NSLayoutRelationEqual,
self.native,
NSLayoutAttributeTrailing,
1.0,
0
)
top_constraint = \
NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
self.placeholder_label,
NSLayoutAttributeTop,
NSLayoutRelationEqual,
self.native,
NSLayoutAttributeTop,
1.0,
8.0
)
bottom_constraint = \
NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
self.placeholder_label,
NSLayoutAttributeBottom,
NSLayoutRelationEqual,
self.native,
NSLayoutAttributeBottom,
1.0,
0
)
self.native.addConstraints_([
leading_constraint,
trailing_constraint,
top_constraint,
bottom_constraint
])
def set_placeholder(self, value):
self.placeholder_label.text = value
def set_readonly(self, value):
self.native.editable = not self.interface._readonly
def set_value(self, value):
self.native.text = self.interface._value
self.placeholder_label.setHidden_(len(self.native.text) > 0)
def get_value(self):
return self.native.text
def rehint(self):
self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
def set_font(self, value):
if value:
self.native.font = value._impl.native
self.placeholder_label.font = value._impl.native
|
the-stack_106_24026 | import json
import threading
import time
from typing import Union, List, Set, Dict
import attr
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
class BinanceMarketDataMessage:
@staticmethod
def from_dict(data: dict, obj_type):
obj = obj_type()
attributes = {
'desc': ('e', str),
'symbol': ('s', str),
'close': ('c', float),
'open': ('o', float),
'price': ('p', float),
'quantity': ('q', float),
'high': ('h', float),
'low': ('l', float),
'first_trade_id': ('f', float),
'is_buyer_market_marker': ('m', bool),
'order_book_update_id': ('u', int),
'best_bid_price': ('b', float),
'best_bid_qty': ('B', float),
'best_ask_price': ('a', float),
'best_ask_qty': ('A', float),
'last_trade_id': ('l', float),
'trade_time': ('T', str),
'total_traded_base_asset_volume': ('v', float),
'total_traded_quote_asset_volume': ('q', float),
'number_of_trades': ('n', float),
'close_time': ('C', str),
'open_time': ('O', str),
'last_quantity': ('Q', float),
'vwap': ('w', float),
'price_change_pct': ('P', float),
'price_change': ('p', float),
'event_time': ('E', str)
}
for attribute, key in attributes.items():
k, attr_type = key
if k in data:
setattr(obj, attribute, attr_type(data[k]))
return obj
def __str__(self):
message_type = type(self).__name__
return message_type + ' - ' + json.dumps(self.__dict__)
@attr.s()
class BinanceTicker:
symbol = attr.ib(type=str)
best_bid_price = attr.ib(type=float)
best_ask_price = attr.ib(type=float)
best_bid_qty = attr.ib(type=float)
best_ask_qty = attr.ib(type=float)
def __str__(self):
return f'futures | {self.symbol} | {self.best_bid_qty:.4f} @ {self.best_bid_price:.2f} | ' \
f'{self.best_ask_qty:.4f} @ {self.best_ask_price:.2f}'
def same_prices(self, obj1):
return obj1.best_bid_price == self.best_bid_price and obj1.best_ask_price == self.best_ask_price
class AggTrade(BinanceMarketDataMessage):
pass
class Ticker(BinanceMarketDataMessage):
pass
class MiniTicker(BinanceMarketDataMessage):
pass
class BookTicker(BinanceMarketDataMessage):
pass
class BinanceMarketDataFuturesAPI:
def __init__(self):
self.binance_websocket_api_manager = BinanceWebSocketApiManager(exchange='binance.com-futures')
@property
def markets(self):
# noinspection SpellCheckingInspection
return {'btcusdt', 'bchusdt', 'ethusdt'}
def register_channels(self, channels: Union[str, List, Set], markets: Union[str, List, Set]):
self.binance_websocket_api_manager.create_stream(channels, markets)
def start(self, on_new_message=None):
threading.Thread(target=self.handle_messages, name='binance_futures_handle_messages',
args=(on_new_message,)).start()
@staticmethod
def on_new_message(data):
print(data)
def handle_messages(self, on_new_message=None):
if on_new_message is None:
on_new_message = self.on_new_message
while True:
if self.binance_websocket_api_manager.is_manager_stopping():
exit(0)
data = self.binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if data is False:
time.sleep(0.01)
else:
data_as_json = json.loads(data)
if 'stream' in data_as_json:
on_new_message(data_as_json)
class BinanceFuturesBBO:
def __init__(self, symbols: Union[str, List] = 'btcusdt'):
# ticker: refreshed once per second. (1000ms).
# miniTicker: refreshed once per second (1000ms).
# aggTrade: The Aggregate Trade Streams push trade information that is aggregated for a single taker order (RT)
# bookTicker: Pushes any update to the best bid or ask's price or quantity in real-time
# for a specified symbol (RT)
if isinstance(symbols, str):
symbols = [symbols]
self.symbols = [s.lower() for s in symbols]
self.messages_type = {}
for s in self.symbols:
self.messages_type.update({
f'{s}@ticker': Ticker,
f'{s}@bookTicker': BookTicker,
f'{s}@miniTicker': MiniTicker,
f'{s}@aggTrade': AggTrade,
})
self.api = BinanceMarketDataFuturesAPI()
self.api.register_channels(['bookTicker'], self.symbols)
self.api.start(on_new_message=self.on_new_message)
self._last_update = {}
while len(self._last_update) != len(self.symbols):
time.sleep(0.001)
def on_new_message(self, data):
message_type = self.messages_type[data['stream']]
payload = data['data']
msg = BinanceMarketDataMessage.from_dict(payload, message_type)
symbol = msg.symbol.lower()
self._last_update[symbol] = msg
def ticker(self, symbol) -> BinanceTicker:
symbol = symbol.lower()
return BinanceTicker(
symbol=symbol,
best_bid_price=self._last_update[symbol].best_bid_price,
best_ask_price=self._last_update[symbol].best_ask_price,
best_bid_qty=self._last_update[symbol].best_bid_qty,
best_ask_qty=self._last_update[symbol].best_ask_qty,
)
@property
def tickers(self) -> Dict[str, BinanceTicker]:
return {s: self.ticker(s) for s in self.symbols}
def print_on_ticker_update(self):
last_tickers = self.tickers
while True:
new_tickers = self.tickers
for symbol in new_tickers:
if not new_tickers[symbol].same_prices(last_tickers[symbol]):
print(new_tickers[symbol])
last_tickers[symbol] = new_tickers[symbol]
|
the-stack_106_24028 | from bokeh.plotting import figure, output_file, save
# prepare some data
x = [1, 2, 3, 4, 5]
y = [4, 5, 5, 7, 2]
# set output to static HTML file
output_file(filename="custom_filename.html", title="Static HTML file")
# create a new plot with a specific size
p = figure(sizing_mode="stretch_width", max_width=500, plot_height=250)
# add a circle renderer
circle = p.circle(x, y, fill_color="red", size=15)
# save the results to a file
save(p)
|
the-stack_106_24031 | #!/usr/bin/python
"""
modules.py
"""
from __future__ import print_function
import os
from mylib import log
from testpkg import module1
from testpkg.module2 import func2
def run_tests():
# type: () -> None
module1.func1()
func2()
dog = Dog('white')
dog.Speak()
cat = module1.Cat()
cat.Speak()
cat2 = Sphinx('brown')
cat2.Speak()
# Test inheritance
cat = cat2
cat.Speak()
cat.AbstractMethod()
def run_benchmarks():
# type: () -> None
i = 0
n = 2000000
result = 0
while i < n:
result += module1.fortytwo()
i = i + 1
log('result = %d', result)
# This is at the bottom to detect order.
class Dog(object):
def __init__(self, color):
# type: (str) -> None
self.color = color
def Speak(self):
# type: () -> None
log('%s dog: meow', self.color)
class Sphinx(module1.Cat):
def __init__(self, color):
# type: (str) -> None
module1.Cat.__init__(self)
self.color = color
def Speak(self):
# type: () -> None
log('%s sphinx', self.color)
def AbstractMethod(self):
# type: () -> None
log('abstract')
if __name__ == '__main__':
if os.getenv('BENCHMARK'):
log('Benchmarking...')
run_benchmarks()
else:
run_tests()
|
the-stack_106_24033 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import client_factory
from azure.cli.core import profiles
from msrestazure import tools
def get_ssh_ip(cmd, resource_group, vm_name, use_private_ip):
compute_client = client_factory.get_mgmt_service_client(cmd.cli_ctx, profiles.ResourceType.MGMT_COMPUTE)
network_client = client_factory.get_mgmt_service_client(cmd.cli_ctx, profiles.ResourceType.MGMT_NETWORK)
vm_client = compute_client.virtual_machines
nic_client = network_client.network_interfaces
ip_client = network_client.public_ip_addresses
vm = vm_client.get(resource_group, vm_name)
for nic_ref in vm.network_profile.network_interfaces:
parsed_id = tools.parse_resource_id(nic_ref.id)
nic = nic_client.get(parsed_id['resource_group'], parsed_id['name'])
for ip_config in nic.ip_configurations:
if use_private_ip and ip_config.private_ip_address:
return ip_config.private_ip_address
public_ip_ref = ip_config.public_ip_address
parsed_ip_id = tools.parse_resource_id(public_ip_ref.id)
public_ip = ip_client.get(parsed_ip_id['resource_group'], parsed_ip_id['name'])
if public_ip.ip_address:
return public_ip.ip_address
return None
|
the-stack_106_24036 | _base_ = [
'../_base_/models/setr_mlala_convfuse.py',
'../_base_/datasets/cityscapes_768x768_foggy.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
model = dict(
backbone=dict(img_size=768,pos_embed_interp=True, drop_rate=0.,mla_channels=256,
model_name='deit_base_distilled_path16_384', mla_index=(2,5,8,11), embed_dim=768, depth=12, num_heads=12),
decode_head=dict(img_size=768,mla_channels=256,mlahead_channels=128,num_classes=19,
# 官方文档参数
sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=100000),
),
auxiliary_head=[
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=0,
img_size=768,
num_classes=19,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=1,
img_size=768,
num_classes=19,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4,
# DeepLabV3 权重
class_weight=[0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754,
1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037,
1.0865, 1.0955, 1.0865, 1.1529, 1.0507]
)),
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=2,
img_size=768,
num_classes=19,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=3,
img_size=768,
num_classes=19,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
])
optimizer = dict(lr=0.001, weight_decay=0.0,
paramwise_cfg = dict(custom_keys={'head': dict(lr_mult=2.)})
)
crop_size = (768, 768)
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512))
find_unused_parameters = True
data = dict(samples_per_gpu=2)
|
the-stack_106_24037 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from zoo.automl.search.abstract import *
import numpy as np
from ray import tune
import json
class Recipe(metaclass=ABCMeta):
"""
Recipe
"""
def __init__(self):
# ----- runtime parameters
self.training_iteration = 1
self.num_samples = 1
self.reward_metric = None
@abstractmethod
def search_space(self, all_available_features):
pass
def runtime_params(self):
runtime_config = {
"training_iteration": self.training_iteration,
"num_samples": self.num_samples,
}
if self.reward_metric is not None:
runtime_config["reward_metric"] = self.reward_metric
return runtime_config
def manual_search_space(self):
return None
class SmokeRecipe(Recipe):
"""
A very simple Recipe for smoke test that runs one epoch and one iteration
with only 1 random sample.
"""
def __init__(self):
super(self.__class__, self).__init__()
def search_space(self, all_available_features):
return {
"selected_features": json.dumps(all_available_features),
"model": "LSTM",
"lstm_1_units": tune.choice([32, 64]),
"dropout_1": tune.uniform(0.2, 0.5),
"lstm_2_units": tune.choice([32, 64]),
"dropout_2": tune.uniform(0.2, 0.5),
"lr": 0.001,
"batch_size": 1024,
"epochs": 1,
"past_seq_len": 2,
}
class MTNetSmokeRecipe(Recipe):
"""
A very simple Recipe for smoke test that runs one epoch and one iteration
with only 1 random sample.
"""
def __init__(self):
super(self.__class__, self).__init__()
def search_space(self, all_available_features):
return {
"selected_features": json.dumps(all_available_features),
"model": "MTNet",
"lr": 0.001,
"batch_size": 16,
"epochs": 1,
"cnn_dropout": 0.2,
"rnn_dropout": 0.2,
"time_step": tune.choice([3, 4]),
"cnn_height": 2,
"long_num": tune.choice([3, 4]),
"ar_size": tune.choice([2, 3]),
"past_seq_len": tune.sample_from(lambda spec:
(spec.config.long_num + 1) * spec.config.time_step),
}
class TCNSmokeRecipe(Recipe):
"""
A very simple Recipe for smoke test that runs one epoch and one iteration
with only 1 random sample.
"""
def __init__(self):
super(self.__class__, self).__init__()
def search_space(self, all_available_features):
return {
"lr": 0.001,
"batch_size": 16,
"nhid": 8,
"levels": 8,
"kernel_size": 3,
"dropout": 0.1
}
class PastSeqParamHandler:
"""
Utility to handle PastSeq Param
"""
def __init__(self):
pass
@staticmethod
def get_past_seq_config(look_back):
"""
generate pass sequence config based on look_back
:param look_back: look_back configuration
:return: search configuration for past sequence
"""
if isinstance(
look_back,
tuple) and len(look_back) == 2 and isinstance(
look_back[0],
int) and isinstance(
look_back[1],
int):
if look_back[1] < 2:
raise ValueError(
"The max look back value should be at least 2")
if look_back[0] < 2:
print(
"The input min look back value is smaller than 2. "
"We sample from range (2, {}) instead.".format(
look_back[1]))
past_seq_config = tune.randint(look_back[0], look_back[1] + 1)
elif isinstance(look_back, int):
if look_back < 2:
raise ValueError(
"look back value should not be smaller than 2. "
"Current value is ", look_back)
past_seq_config = look_back
else:
raise ValueError(
"look back is {}.\n "
"look_back should be either a tuple with 2 int values:"
" (min_len, max_len) or a single int".format(look_back))
return past_seq_config
class GridRandomRecipe(Recipe):
"""
A recipe involves both grid search and random search.
tsp = TimeSequencePredictor(...,recipe = GridRandomRecipe(1))
"""
def __init__(
self,
num_rand_samples=1,
look_back=2,
epochs=5,
training_iteration=10):
"""
Constructor.
:param num_rand_samples: number of hyper-param configurations sampled randomly
:param look_back: the length to look back, either a tuple with 2 int values,
which is in format is (min len, max len), or a single int, which is
a fixed length to look back.
:param training_iteration: no. of iterations for training (n epochs) in trials
:param epochs: no. of epochs to train in each iteration
"""
super(self.__class__, self).__init__()
self.num_samples = num_rand_samples
self.training_iteration = training_iteration
self.past_seq_config = PastSeqParamHandler.get_past_seq_config(
look_back)
self.epochs = epochs
def search_space(self, all_available_features):
return {
# -------- feature related parameters
"selected_features": tune.sample_from(lambda spec:
json.dumps(
list(np.random.choice(
all_available_features,
size=np.random.randint(
low=3,
high=len(all_available_features)),
replace=False)))),
# -------- model selection TODO add MTNet
"model": tune.choice(["LSTM", "Seq2seq"]),
# --------- Vanilla LSTM model parameters
"lstm_1_units": tune.grid_search([16, 32]),
"dropout_1": 0.2,
"lstm_2_units": tune.grid_search([16, 32]),
"dropout_2": tune.uniform(0.2, 0.5),
# ----------- Seq2Seq model parameters
"latent_dim": tune.grid_search([32, 64]),
"dropout": tune.uniform(0.2, 0.5),
# ----------- optimization parameters
"lr": tune.uniform(0.001, 0.01),
"batch_size": tune.choice([32, 64], replace=False),
"epochs": self.epochs,
"past_seq_len": self.past_seq_config,
}
class LSTMGridRandomRecipe(Recipe):
"""
A recipe involves both grid search and random search, only for LSTM.
tsp = TimeSequencePredictor(...,recipe = LSTMGridRandomRecipe(1))
"""
def __init__(
self,
num_rand_samples=1,
epochs=5,
training_iteration=10,
look_back=2,
lstm_1_units=[16, 32, 64, 128],
lstm_2_units=[16, 32, 64],
batch_size=[32, 64]):
"""
Constructor.
:param lstm_1_units: random search candidates for num of lstm_1_units
:param lstm_2_units: grid search candidates for num of lstm_1_units
:param batch_size: grid search candidates for batch size
:param num_rand_samples: number of hyper-param configurations sampled randomly
:param look_back: the length to look back, either a tuple with 2 int values,
which is in format is (min len, max len), or a single int, which is
a fixed length to look back.
:param training_iteration: no. of iterations for training (n epochs) in trials
:param epochs: no. of epochs to train in each iteration
"""
super(self.__class__, self).__init__()
# -- runtime params
self.num_samples = num_rand_samples
self.training_iteration = training_iteration
# -- model params
self.past_seq_config = PastSeqParamHandler.get_past_seq_config(
look_back)
self.lstm_1_units_config = tune.choice(lstm_1_units)
self.lstm_2_units_config = tune.grid_search(lstm_2_units)
self.dropout_2_config = tune.uniform(0.2, 0.5)
# -- optimization params
self.lr = tune.uniform(0.001, 0.01)
self.batch_size = tune.grid_search(batch_size)
self.epochs = epochs
def search_space(self, all_available_features):
return {
# -------- feature related parameters
"selected_features": tune.sample_from(lambda spec:
json.dumps(
list(np.random.choice(
all_available_features,
size=np.random.randint(
low=3,
high=len(all_available_features) + 1),
replace=False)))),
"model": "LSTM",
# --------- Vanilla LSTM model parameters
"lstm_1_units": self.lstm_1_units_config,
"dropout_1": 0.2,
"lstm_2_units": self.lstm_2_units_config,
"dropout_2": self.dropout_2_config,
# ----------- optimization parameters
"lr": self.lr,
"batch_size": self.batch_size,
"epochs": self.epochs,
"past_seq_len": self.past_seq_config,
}
class Seq2SeqRandomRecipe(Recipe):
"""
A recipe involves both grid search and random search, only for LSTM.
tsp = TimeSequencePredictor(...,recipe = LSTMGridRandomRecipe(1))
"""
def __init__(
self,
num_rand_samples=1,
epochs=5,
training_iteration=10,
look_back=2,
latent_dim=[32, 64, 128, 256],
batch_size=[32, 64]):
"""
Constructor.
:param lstm_1_units: random search candidates for num of lstm_1_units
:param lstm_2_units: grid search candidates for num of lstm_1_units
:param batch_size: grid search candidates for batch size
:param num_rand_samples: number of hyper-param configurations sampled randomly
:param look_back: the length to look back, either a tuple with 2 int values,
which is in format is (min len, max len), or a single int, which is
a fixed length to look back.
:param training_iteration: no. of iterations for training (n epochs) in trials
:param epochs: no. of epochs to train in each iteration
"""
super(self.__class__, self).__init__()
# -- runtime params
self.num_samples = num_rand_samples
self.training_iteration = training_iteration
# -- model params
self.past_seq_config = PastSeqParamHandler.get_past_seq_config(
look_back)
self.latent_dim = tune.choice(latent_dim)
self.dropout_config = tune.uniform(0.2, 0.5)
# -- optimization params
self.lr = tune.uniform(0.001, 0.01)
self.batch_size = tune.grid_search(batch_size)
self.epochs = epochs
def search_space(self, all_available_features):
return {
# -------- feature related parameters
"selected_features": tune.sample_from(lambda spec:
json.dumps(
list(np.random.choice(
all_available_features,
size=np.random.randint(
low=3,
high=len(all_available_features) + 1),
replace=False)))),
"model": "Seq2Seq",
"latent_dim": self.latent_dim,
"dropout": self.dropout_config,
# ----------- optimization parameters
"lr": self.lr,
"batch_size": self.batch_size,
"epochs": self.epochs,
"past_seq_len": self.past_seq_config,
}
class MTNetGridRandomRecipe(Recipe):
"""
Grid+Random Recipe for MTNet
"""
def __init__(self,
num_rand_samples=1,
epochs=5,
training_iteration=10,
time_step=[3, 4],
long_num=[3, 4],
cnn_height=[2, 3],
cnn_hid_size=[32, 50, 100],
ar_size=[2, 3],
batch_size=[32, 64]):
"""
Constructor.
:param num_rand_samples: number of hyper-param configurations sampled randomly
:param training_iteration: no. of iterations for training (n epochs) in trials
:param epochs: no. of epochs to train in each iteration
:param time_step: random search candidates for model param "time_step"
:param long_num: random search candidates for model param "long_num"
:param ar_size: random search candidates for model param "ar_size"
:param batch_size: grid search candidates for batch size
:param cnn_height: random search candidates for model param "cnn_height"
:param cnn_hid_size: random search candidates for model param "cnn_hid_size"
"""
super(self.__class__, self).__init__()
# -- run time params
self.num_samples = num_rand_samples
self.training_iteration = training_iteration
# -- optimization params
self.lr = tune.uniform(0.001, 0.01)
self.batch_size = tune.grid_search(batch_size)
self.epochs = epochs
# ---- model params
self.cnn_dropout = tune.uniform(0.2, 0.5)
self.rnn_dropout = tune.uniform(0.2, 0.5)
self.time_step = tune.choice(time_step)
self.long_num = tune.choice(long_num,)
self.cnn_height = tune.choice(cnn_height)
self.cnn_hid_size = tune.choice(cnn_hid_size)
self.ar_size = tune.choice(ar_size)
self.past_seq_len = tune.sample_from(
lambda spec: (
spec.config.long_num + 1) * spec.config.time_step)
def search_space(self, all_available_features):
return {
"selected_features": tune.sample_from(lambda spec:
json.dumps(
list(np.random.choice(
all_available_features,
size=np.random.randint(
low=3,
high=len(all_available_features)),
replace=False)))),
"model": "MTNet",
"lr": self.lr,
"batch_size": self.batch_size,
"epochs": self.epochs,
"cnn_dropout": self.cnn_dropout,
"rnn_dropout": self.rnn_dropout,
"time_step": self.time_step,
"long_num": self.long_num,
"ar_size": self.ar_size,
"past_seq_len": self.past_seq_len,
"cnn_hid_size": self.cnn_hid_size,
"cnn_height": self.cnn_height
}
class TCNGridRandomRecipe(Recipe):
"""
Grid+Random Recipe for TCN
"""
# TODO: use some more generalized exp hyperparameters
def __init__(self,
num_rand_samples=1,
training_iteration=40,
batch_size=[256, 512],
hidden_size=[32, 48],
levels=[6, 8],
kernel_size=[3, 5],
dropout=[0, 0.1],
lr=[0.001, 0.003]
):
"""
Constructor.
:param num_rand_samples: number of hyper-param configurations sampled randomly
:param training_iteration: no. of iterations for training (n epochs) in trials
:param batch_size: grid search candidates for batch size
:param hidden_size: grid search candidates for hidden size of each layer
:param levels: the number of layers
:param kernel_size: the kernel size of each layer
:param dropout: dropout rate (1 - keep probability)
:param lr: learning rate
"""
super(self.__class__, self).__init__()
# -- run time params
self.num_samples = num_rand_samples
self.training_iteration = training_iteration
# -- optimization params
self.lr = tune.choice(lr)
self.batch_size = tune.grid_search(batch_size)
# ---- model params
self.hidden_size = tune.grid_search(hidden_size)
self.levels = tune.grid_search(levels)
self.kernel_size = tune.grid_search(kernel_size)
self.dropout = tune.choice(dropout)
def search_space(self, all_available_features):
return {
"lr": self.lr,
"batch_size": self.batch_size,
"nhid": self.hidden_size,
"levels": self.levels,
"kernel_size": self.kernel_size,
"dropout": self.dropout
}
class RandomRecipe(Recipe):
"""
Pure random sample Recipe. Often used as baseline.
tsp = TimeSequencePredictor(...,recipe = RandomRecipe(5))
"""
def __init__(
self,
num_rand_samples=1,
look_back=2,
epochs=5,
reward_metric=-0.05,
training_iteration=10):
"""
:param num_rand_samples: number of hyper-param configurations sampled randomly
:param look_back:the length to look back, either a tuple with 2 int values,
which is in format is (min len, max len), or a single int, which is
a fixed length to look back.
:param reward_metric: the rewarding metric value, when reached, stop trial
:param training_iteration: no. of iterations for training (n epochs) in trials
:param epochs: no. of epochs to train in each iteration
"""
super(self.__class__, self).__init__()
self.num_samples = num_rand_samples
self.reward_metric = reward_metric
self.training_iteration = training_iteration
self.epochs = epochs
self.past_seq_config = PastSeqParamHandler.get_past_seq_config(
look_back)
def search_space(self, all_available_features):
import random
return {
# -------- feature related parameters
"selected_features": tune.sample_from(lambda spec:
json.dumps(
list(np.random.choice(
all_available_features,
size=np.random.randint(
low=3,
high=len(all_available_features)),
replace=False)))),
"model": tune.choice(["LSTM", "Seq2seq"]),
# --------- Vanilla LSTM model parameters
"lstm_1_units": tune.choice([8, 16, 32, 64, 128]),
"dropout_1": tune.uniform(0.2, 0.5),
"lstm_2_units": tune.choice([8, 16, 32, 64, 128]),
"dropout_2": tune.uniform(0.2, 0.5),
# ----------- Seq2Seq model parameters
"latent_dim": tune.choice([32, 64, 128, 256]),
"dropout": tune.uniform(0.2, 0.5),
# ----------- optimization parameters
"lr": tune.uniform(0.001, 0.01),
"batch_size": tune.choice([32, 64, 1024], replace=False),
"epochs": self.epochs,
"past_seq_len": self.past_seq_config,
}
class BayesRecipe(Recipe):
"""
A Bayes search Recipe. (Experimental)
tsp = TimeSequencePredictor(...,recipe = BayesRecipe(5))
"""
def __init__(
self,
num_samples=1,
look_back=2,
epochs=5,
reward_metric=-0.05,
training_iteration=5):
"""
Constructor
:param num_samples: number of hyper-param configurations sampled
:param look_back: the length to look back, either a tuple with 2 int values,
which is in format is (min len, max len), or a single int, which is
a fixed length to look back.
:param reward_metric: the rewarding metric value, when reached, stop trial
:param training_iteration: no. of iterations for training (n epochs) in trials
:param epochs: no. of epochs to train in each iteration
"""
super(self.__class__, self).__init__()
self.num_samples = num_samples
self.reward_metric = reward_metric
self.training_iteration = training_iteration
self.epochs = epochs
if isinstance(look_back, tuple) and len(look_back) == 2 and \
isinstance(look_back[0], int) and isinstance(look_back[1], int):
if look_back[1] < 2:
raise ValueError("The max look back value should be at least 2")
if look_back[0] < 2:
print("The input min look back value is smaller than 2. "
"We sample from range (2, {}) instead.".format(look_back[1]))
self.bayes_past_seq_config = {"past_seq_len_float": look_back}
self.fixed_past_seq_config = {}
elif isinstance(look_back, int):
if look_back < 2:
raise ValueError(
"look back value should not be smaller than 2. "
"Current value is ", look_back)
self.bayes_past_seq_config = {}
self.fixed_past_seq_config = {"past_seq_len": look_back}
else:
raise ValueError(
"look back is {}.\n "
"look_back should be either a tuple with 2 int values:"
" (min_len, max_len) or a single int".format(look_back))
def manual_search_space(self):
model_space = {
# --------- model parameters
"lstm_1_units_float": (8, 128),
"dropout_1": (0.2, 0.5),
"lstm_2_units_float": (8, 128),
"dropout_2": (0.2, 0.5),
# ----------- optimization parameters
"lr": (0.001, 0.01),
"batch_size_log": (5, 10),
}
total_space = model_space.copy()
total_space.update(self.bayes_past_seq_config)
return total_space
def search_space(self, all_available_features):
total_fixed_params = {
"epochs": self.epochs,
"model": "LSTM",
"selected_features": json.dumps(all_available_features),
# "batch_size": 1024,
}
total_fixed_params.update(self.fixed_past_seq_config)
return total_fixed_params
class XgbRegressorGridRandomRecipe(Recipe):
def __init__(
self,
num_rand_samples=1,
n_estimators=[8, 15],
max_depth=[10, 15],
n_jobs=-1,
tree_method='hist',
random_state=2,
seed=0,
lr=(1e-4, 1e-1),
subsample=0.8,
colsample_bytree=0.8,
min_child_weight=[1, 2, 3],
gamma=0,
reg_alpha=0,
reg_lambda=1):
"""
"""
super(self.__class__, self).__init__()
self.num_samples = num_rand_samples
self.n_jobs = n_jobs
self.tree_method = tree_method
self.random_state = random_state
self.seed = seed
self.colsample_bytree = colsample_bytree
self.gamma = gamma
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.n_estimators = tune.grid_search(n_estimators)
self.max_depth = tune.grid_search(max_depth)
self.lr = tune.loguniform(lr[0], lr[-1])
self.subsample = subsample
self.min_child_weight = tune.choice(min_child_weight)
def search_space(self, all_available_features):
return {
# -------- feature related parameters
"model": "XGBRegressor",
"imputation": tune.choice(["LastFillImpute", "FillZeroImpute"]),
"n_estimators": self.n_estimators,
"max_depth": self.max_depth,
"min_child_weight": self.min_child_weight,
"lr": self.lr
}
class XgbRegressorSkOptRecipe(Recipe):
def __init__(
self,
num_rand_samples=10,
n_estimators_range=(50, 1000),
max_depth_range=(2, 15),
):
"""
"""
super(self.__class__, self).__init__()
self.num_samples = num_rand_samples
self.n_estimators_range = n_estimators_range
self.max_depth_range = max_depth_range
def search_space(self, all_available_features):
space = {
"n_estimators": tune.randint(self.n_estimators_range[0],
self.n_estimators_range[1]),
"max_depth": tune.randint(self.max_depth_range[0],
self.max_depth_range[1]),
}
return space
def opt_params(self):
from skopt.space import Integer
params = [
Integer(self.n_estimators_range[0], self.n_estimators_range[1]),
Integer(self.max_depth_range[0], self.max_depth_range[1]),
]
return params
|
the-stack_106_24040 |
def readconfig(fn:str):
with open(fn,"r") as fp:
buf=""
out={}
while 1:
line = fp.readline()
if line == "":
break
if line.strip() == "" or line.strip()[0]=="#":
continue
if "#" in line:
line=line[:line.index("#")]
buf += line.strip()
if line.strip()[-1]=="\\":
buf=buf[:-1]
continue
k,v=[j.strip() for j in buf.split(":")]
out[k]=v
buf=""
return out
|
the-stack_106_24042 | # coding: utf-8
import numpy as np
from common_function import *
from util import im2col, col2im
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid:
def __init__(self):
self.out = None
def forward(self, x):
out = sigmoid(x)
self.out = out
return out
def backward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Affine:
def __init__(self, W, b):
self.W =W
self.b = b
self.x = None
self.original_x_shape = None
# 重み・バイアスパラメータの微分
self.dW = None
self.db = None
def forward(self, x):
# テンソル対応
self.original_x_shape = x.shape
#x = x.reshape(x.shape[0],-1)
self.x = x
out = np.dot(self.x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
dx = dx.reshape(*self.original_x_shape) # 入力データの形状に戻す(テンソル対応)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None # softmaxの出力
self.t = None # 教師データ
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
if self.t.size == self.y.size: # 教師データがone-hot-vectorの場合
dx = (self.y - self.t) / batch_size
else:
dx = self.y.copy()
dx[np.arange(batch_size), self.t] -= 1
dx = dx / batch_size
return dx
class Dropout:
"""
http://arxiv.org/abs/1207.0580
"""
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
class BatchNormalization:
"""
http://arxiv.org/abs/1502.03167
"""
def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
self.gamma = gamma
self.beta = beta
self.momentum = momentum
self.input_shape = None # Conv層の場合は4次元、全結合層の場合は2次元
# テスト時に使用する平均と分散
self.running_mean = running_mean
self.running_var = running_var
# backward時に使用する中間データ
self.batch_size = None
self.xc = None
self.std = None
self.dgamma = None
self.dbeta = None
def forward(self, x, train_flg=True):
self.input_shape = x.shape
if x.ndim != 2:
N, C, H, W = x.shape
x = x.reshape(N, -1)
out = self.__forward(x, train_flg)
return out.reshape(*self.input_shape)
def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)
if train_flg:
mu = x.mean(axis=0)
xc = x - mu
var = np.mean(xc**2, axis=0)
std = np.sqrt(var + 10e-7)
xn = xc / std
self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu
self.running_var = self.momentum * self.running_var + (1-self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / ((np.sqrt(self.running_var + 10e-7)))
out = self.gamma * xn + self.beta
return out
def backward(self, dout):
if dout.ndim != 2:
N, C, H, W = dout.shape
dout = dout.reshape(N, -1)
dx = self.__backward(dout)
dx = dx.reshape(*self.input_shape)
return dx
def __backward(self, dout):
dbeta = dout.sum(axis=0)
dgamma = np.sum(self.xn * dout, axis=0)
dxn = self.gamma * dout
dxc = dxn / self.std
dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)
dvar = 0.5 * dstd / self.std
dxc += (2.0 / self.batch_size) * self.xc * dvar
dmu = np.sum(dxc, axis=0)
dx = dxc - dmu / self.batch_size
self.dgamma = dgamma
self.dbeta = dbeta
return dx
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
# 中間データ(backward時に使用)
self.x = None
self.col = None
self.col_W = None
# 重み・バイアスパラメータの勾配
self.dW = None
self.db = None
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = 1 + int((H + 2*self.pad - FH) / self.stride)
out_w = 1 + int((W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
self.x = x
self.col = col
self.col_W = col_W
return out
def backward(self, dout):
FN, C, FH, FW = self.W.shape
dout = dout.transpose(0,2,3,1).reshape(-1, FN)
self.db = np.sum(dout, axis=0)
self.dW = np.dot(self.col.T, dout)
self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW)
dcol = np.dot(dout, self.col_W.T)
dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad)
return dx
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
self.x = None
self.arg_max = None
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + (H - self.pool_h) / self.stride)
out_w = int(1 + (W - self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h*self.pool_w)
arg_max = np.argmax(col, axis=1)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)
self.x = x
self.arg_max = arg_max
return out
def backward(self, dout):
dout = dout.transpose(0, 2, 3, 1)
pool_size = self.pool_h * self.pool_w
dmax = np.zeros((dout.size, pool_size))
dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten()
dmax = dmax.reshape(dout.shape + (pool_size,))
dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1)
dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad)
return dx |
the-stack_106_24043 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'recipes_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('recipes.urls')),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
the-stack_106_24044 | #/usr/bin/env python
try:
from flask import Flask
except ImportError:
print("\n[X] Please install Flask:")
print(" $ pip install flask\n")
exit()
from optparse import OptionParser
from wordpot.logger import *
from werkzeug.routing import BaseConverter
from wordpot.plugins_manager import PluginsManager
import os
# ---------------
# Regex Converter
# ---------------
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
# -------
# Options
# -------
REQUIRED_OPTIONS = {
'HOST': '127.0.0.1',
'PORT': '8088',
'THEME': 'twentyeleven',
'BLOGTITLE': 'This is my first blog and I am very intersted in Trekking and Hiking',
'AUTHORS': ['admin']
}
def parse_options():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option('--host', dest='HOST', help='Host address')
parser.add_option('--port', dest='PORT', help='Port number')
parser.add_option('--title', dest='BLOGTITLE', help='Blog title')
parser.add_option('--theme', dest='THEME', help='Default theme name')
parser.add_option('--plugins', dest='PLUGINS', help='Fake installed plugins')
parser.add_option('--themes', dest='THEMES', help='Fake installed themes')
parser.add_option('--ver', dest='VERSION', help='Wordpress version')
parser.add_option('--server', dest='SERVER', help='Custom "Server" header')
(options, args) = parser.parse_args()
for opt, val in options.__dict__.items():
if val is not None:
if opt in ['PLUGINS', 'THEMES']:
val = [ v.strip() for v in val.split(',') ]
app.config[opt] = val
def check_options():
for k, v in REQUIRED_OPTIONS.items():
if k not in app.config:
LOGGER.error('%s was not set. Falling back to default: %s', k, v)
app.config[k] = v
# -------------------
# Building the Logger
# -------------------
logging_setup()
# ------------
# Building app
# ------------
app = Flask('wordpot')
app.url_map.converters['regex'] = RegexConverter
# Import config from file
conffile = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../wordpot.conf')
LOGGER.info('Loading conf file: %s', conffile)
try:
app.config.from_pyfile(conffile)
except:
LOGGER.error('Can\'t load conf file')
check_options()
if app.config['HPFEEDS_ENABLED']:
import hpfeeds
print('Connecting to hpfeeds broker {}:{}'.format(app.config['HPFEEDS_HOST'], app.config['HPFEEDS_PORT']))
app.config['hpfeeds_client'] = hpfeeds.new(
app.config['HPFEEDS_HOST'],
app.config['HPFEEDS_PORT'],
app.config['HPFEEDS_IDENT'],
app.config['HPFEEDS_SECRET']
)
app.config['hpfeeds_client'].s.settimeout(0.01)
else:
LOGGER.warn('hpfeeds is disabled')
# ------------------------
# Add Custom Server Header
#-------------------------
@app.after_request
def add_server_header(response):
if app.config['SERVER']:
response.headers['Server'] = app.config['SERVER']
return response
# ----------------------------
# Building the plugins manager
# ----------------------------
pm = PluginsManager()
pm.load()
import wordpot.views
|
the-stack_106_24045 | """Demo platform that has two fake binary sensors."""
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo binary sensor platform."""
async_add_entities(
[
DemoBinarySensor("binary_1", "Basement Floor Wet", False, "moisture"),
DemoBinarySensor("binary_2", "Movement Backyard", True, "motion"),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoBinarySensor(BinarySensorEntity):
"""representation of a Demo binary sensor."""
def __init__(self, unique_id, name, state, device_class):
"""Initialize the demo sensor."""
self._unique_id = unique_id
self._name = name
self._state = state
self._sensor_type = device_class
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.unique_id)
},
"name": self.name,
}
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def should_poll(self):
"""No polling needed for a demo binary sensor."""
return False
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
|
the-stack_106_24046 | #!/usr/bin/env python3
import sys
import os
import re
import json
import merger
import numpy
import matplotlib.pyplot as plt
import datetime
class Object:
pass
class GrowingList( list ):
def __getitem__( self, index ):
if index >= len(self):
self.extend( [0] * ( index + 1 - len( self ) ) )
return list.__getitem__( self, index )
def __setitem__( self, index, value ):
if index >= len( self ):
self.extend( [0] * ( index + 1 - len( self ) ) )
list.__setitem__( self, index, value )
def lineToEvent( line ):
ev = Object()
ev.ts = line[ 0 ]
ev.pid = line[ 1 ]
ev.tid = line[ 2 ]
ev.source = line[ 3 ]
if ev.source == "macro":
ev.tkid = line[ 4 ]
ev.kind = line[ 5 ]
misc = line[ 6 ]
try:
ev.ctx = misc[ "ctx" ]
except:
pass
try:
ev.ctx_ptr = misc[ "ctx_ptr" ]
except:
pass
try:
ev.step = misc[ "step" ]
except:
pass
try:
ev.recurring = misc[ "recurring" ]
except:
pass
try:
ev.name = misc[ "name" ]
except:
pass
elif ev.source == "micro":
ev.tkid = "micro"
ev.kind = line[ 4 ]
misc = line[ 5 ]
try:
ev.props = misc[ "props" ]
except:
pass
try:
ev.num_tasks = misc[ "num_tasks" ]
except:
pass
try:
ev.callback = misc[ "callback" ]
except:
pass
try:
ev.ctx = misc[ "ctx" ]
except:
pass
try:
ev.ctxdesc = misc[ "ctxdesc" ]
except:
pass
try:
ev.count = misc[ "count" ]
except:
pass
try:
ev.name = misc[ "name" ]
except:
pass
elif ev.source == "exec":
ev.kind = line[ 4 ]
misc = line[ 5 ]
try:
ev.ctx = misc[ "ctx" ]
except:
pass
try:
ev.ctxdesc = misc[ "ctxdesc" ]
except:
pass
try:
ev.ctor = misc[ "ctor" ]
except:
pass
try:
ev.has_exn = misc[ "has_exn" ]
except:
pass
try:
ev.throwOnAllowed = misc[ "throwOnAllowed" ]
except:
pass
try:
ev.name = misc[ "name" ]
except:
pass
else:
print( "UNKNOWN TASK SOURCE: %s" % ev.source )
exit()
return ev
def timestamp( trace_entry ):
try:
rv = trace_entry[ "ts" ]
except:
rv = trace_entry[ 0 ]
# print( "%s -- %s -- %s" % ( trace_entry, rv, type( rv ) ) )
return rv
def fancyPlot( name, data ):
if len( data ) < 1:
return
colors = [ plt.cm.viridis( 0.05 ) ]
if len( data ) > 1:
incr = 0.9 / ( len( data ) - 1 )
for i in range( 1, len( data ) ):
colors.append( plt.cm.viridis( 0.05 + ( incr * i ) ) )
# csfont = {'fontname':'Comic Sans MS'}
# hfont = {'fontname':'Helvetica'}
fig = plt.figure( figsize=( 4, 3 ) )
# plt.title( 'title', **csfont )
# plt.xlabel( 'xlabel', **hfont )
# plt.title( name )
plt.xscale( "log" )
host = fig.add_subplot(111)
color_idx = 0
for stuff in data:
ns = stuff[ 0 ]
splitx = stuff[ 1 ] if len( stuff ) > 1 else False
splity = stuff[ 2 ] if len( stuff ) > 2 else False
n = len( ns )
ys = list( range( n ) )
# for i in range( n ):
# ys[ i ] = ys[ i ] / n
plotable = host.twinx() if splitx else host
plotable = plotable.twiny() if splity else plotable
plotable.set_xscale( "log" )
# plotable.plot( ns, ys, color=colors[ color_idx ] )
lines = plotable.plot( ns, ys )
if color_idx == 0:
plt.setp( lines, color="black" )
elif color_idx == 1:
plt.setp( lines, color="black" )
plt.setp( lines, linewidth="2.0" )
plt.setp( lines, linestyle="--" )
plt.setp( lines, dashes=( 4, 2 ) )
else:
plt.setp( lines, color="black" )
plt.setp( lines, linewidth="2.0" )
plt.setp( lines, linestyle=":" )
# plt.setp( lines, dashes=( 3, 3 ) )
color_idx += 1
if name is "gaps":
x1, x2, y1, y2 = plt.axis()
plt.axis( ( x1, 500000, y1, y2 ) )
if name is "lengths":
x1, x2, y1, y2 = plt.axis()
plt.axis( ( x1, 1000000, y1, y2 ) )
if False:
plt.show()
else:
plt.savefig( "%s.pdf" % name, bbox_inches='tight' )
def parseBeginsAndEnds( basepath ):
begins_ends = []
for fname in os.listdir( basepath ):
path = os.path.join( basepath, fname )
trace = []
with open( path ) as f:
for line in f:
if "CHARCOAL_BEGIN_RECORDING_TRACE" in line:
try:
j = json.loads( line )
print( j )
begins_ends.append( ( "B", j[ 0 ] ) )
except:
print( "json parse failed!!! %s" % line )
sys.exit()
if "CHARCOAL_END_RECORDING_TRACE" in line:
try:
j = json.loads( line )
print( j )
begins_ends.append( ( "E", j[ 0 ] ) )
except:
print( "json parse failed!!! %s" % line )
sys.exit()
recording_ranges = []
def k( be ):
return be[ 1 ]
begins_ends.sort( key=k )
last_begin = None
for b_or_e in begins_ends:
if last_begin is None:
if b_or_e[ 0 ] is "B":
last_begin = b_or_e[ 1 ]
elif b_or_e[ 0 ] is "E":
print( "Weird end without begin" )
else:
print( "WHOA! %s" % b_or_e[ 0 ] )
exit()
else:
if b_or_e[ 0 ] is "B":
print( "Weird double begin" )
elif b_or_e[ 0 ] is "E":
recording_ranges.append( ( last_begin, b_or_e[ 1 ] ) )
last_begin = None
else:
print( "WHOA! %s" % b_or_e[ 0 ] )
exit()
print( recording_ranges )
return recording_ranges
def printIles( name, items, iles ):
L = len( items )
print( "%s %10d " % ( name, L ), end="" )
if L < 1:
print( "EMPTY" )
return
for p in iles:
print( "%8.0f " % items[ int( L * p ) ], end="" )
print( "" )
def parseProcessInfo( path ):
processes = {}
r = re.compile( "\"CHARCOAL_PROCESS_INFO\s+(A|T)\s+(\d+)\s+(\w+)\"" )
with open( path ) as f:
for line in f:
result = r.search( line )
if result is None:
continue
id = int( result.group( 2 ) )
kind = result.group( 3 )
try:
if processes[ id ] != kind:
print( "WRONG PROCESS KIND %s %s" % ( kind, processes[ id ] ) )
exit()
except:
processes[ id ] = kind
print( processes )
render_processes = set()
for key, value in processes.items():
if value == "renderer":
render_processes.add( key )
print( render_processes )
return render_processes
def combineStats( s1, s2 ):
s1.children_per_cont.extend ( s2.children_per_cont )
s1.pchildren_per_edge.extend( s2.pchildren_per_edge )
s1.macro_lengths.extend ( s2.macro_lengths )
s1.recur_lengths.extend ( s2.recur_lengths )
s1.not_recur_lengths.extend ( s2.not_recur_lengths )
s1.micro_lengths.extend ( s2.micro_lengths )
s1.before_counts.extend ( s2.before_counts )
s1.after_counts.extend ( s2.after_counts )
s1.chain2_lengths.extend ( s2.chain2_lengths )
s1.chain3_lengths.extend ( s2.chain3_lengths )
s1.chain4_lengths.extend ( s2.chain4_lengths )
s1.spans.extend ( s2.spans )
for key, value in s2.api_kind.items():
try:
s1.api_kind[ key ] += value
except:
s1.api_kind[ key ] = value
for key, value in s2.gaps.items():
try:
s1.gaps[ key ].extend( value )
except:
s1.gaps[ key ] = value
def showStats( s ):
all_lengths = s.macro_lengths[:]
all_lengths.extend( s.micro_lengths )
allnr_lengths = s.not_recur_lengths[:]
allnr_lengths.extend( s.micro_lengths )
s.macro_lengths.sort()
s.recur_lengths.sort()
s.not_recur_lengths.sort()
s.micro_lengths.sort()
all_lengths.sort()
allnr_lengths.sort()
s.children_per_cont.sort()
s.pchildren_per_edge.sort()
s.before_counts.sort()
s.after_counts.sort()
s.chain2_lengths.sort()
s.chain3_lengths.sort()
s.chain4_lengths.sort()
s.spans.sort()
shorties1m = {}
shorties100u = {}
def incrShorty1m( sched_name ):
try:
shorties1m[ sched_name ] += 1
except:
shorties1m[ sched_name ] = 1
def incrShorty100u( sched_name ):
try:
shorties100u[ sched_name ] += 1
except:
shorties100u[ sched_name ] = 1
aggregate_gaps = []
for sched_name, value in s.gaps.items():
s.gaps[ sched_name ].sort()
for gap in value:
if gap > 1000:
break
incrShorty1m( sched_name )
if gap > 100:
continue
incrShorty100u( sched_name )
# print( "GAPS \"%s\" %d" % ( sched_name, len( value ) ) )
# if sched_name not in [ "interrupted", "uninterrupted", "RECURRING - RECURRING", "RECURRING - SendRequest" ]:
# aggregate_gaps.extend( value )
# shorts = 0
# for x in value:
# if x < 100:
# shorts += 1
# print( "BLROPS: %s %s" % ( sched_name, shorts ) )
aggregate_gaps.sort()
not_recur_gaps = []
for sched_name, value in s.gaps.items():
if sched_name == "interrupted" or sched_name == "uninterrupted" or sched_name.startswith( "RECURRING" ):
continue
not_recur_gaps.extend( value )
not_recur_gaps.sort()
printIles( "AL", s.macro_lengths, [ 0.1, 0.5, 0.9, 0.95, 0.99 ] )
printIles( "IL", s.micro_lengths, [ 0.1, 0.5, 0.9, 0.95, 0.99 ] )
printIles( "GA", s.gaps[ "uninterrupted" ], [ 0.01, 0.05, 0.1, 0.5, 0.9 ] )
printIles( "BC", s.before_counts, [ 0.5, 0.9, 0.95, 0.99, 0.999 ] )
printIles( "CC", s.after_counts, [ 0.5, 0.9, 0.95, 0.99, 0.999 ] )
printIles( "C2", s.chain2_lengths,[ 0.5, 0.9, 0.95, 0.99, 0.999 ] )
printIles( "C3", s.chain3_lengths,[ 0.5, 0.9, 0.95, 0.99, 0.999 ] )
printIles( "C4", s.chain4_lengths,[ 0.5, 0.9, 0.95, 0.99, 0.999 ] )
print( "SHORTIES 1 millisecond %s" % shorties1m )
print( "SHORTIES 100 microseconds %s" % shorties100u )
# print( s.api_kind )
# fancyPlot( "lengths", [ [ s.recur_lengths ], [ allnr_lengths ] ] )
short_gaps = list( filter( lambda g: g < 1000, aggregate_gaps ) )
short_spans = list( filter( lambda s: s < 1000, s.spans ) )
fancyPlot( "gap-span", [ [ short_gaps ], [ short_spans ] ] )
# fancyPlot( "children", [ [ s.children_per_cont ], [ s.pchildren_per_edge ] ] )
# fancyPlot( "gaps", [ [ s.gaps[ "uninterrupted" ] ], [ s.gaps[ "interrupted" ] ], [ not_recur_gaps ] ] )
# fancyPlot( "micros", [ [ s.micro_lengths ], [ s.after_counts, False, True ] ] )
# fancyPlot( "gaps", [ [ s.gaps[ "uninterrupted" ] ], [ s.gaps[ "interrupted" ] ], [ aggregate_gaps ] ] )
# fancyPlot( "chains", [ [ s.chain2_lengths ], [ s.chain3_lengths ], [ s.chain4_lengths ] ] )
def stacker( trace ):
all_continuations = []
most_recent_task = None
global_continuation = Object()
global_continuation.tkid = None
global_continuation.events = []
continuation_stack = [ global_continuation ]
max_depth = 1
call_depths = GrowingList()
micro_depths = GrowingList()
macro_lengths = []
recur_lengths = []
not_recur_lengths = []
micro_lengths = []
before_counts = []
after_counts = []
gaps ={ "interrupted":[],
"uninterrupted":[]
}
chain2_lengths = []
chain3_lengths = []
chain4_lengths = []
pc_good = 0
missing_parent = 0
missing_end = 0
parent_of = {}
parent_of[ "micro" ] = None
api_kind = {}
def incrApiKind( kind ):
try:
api_kind[ kind ] += 1
except:
api_kind[ kind ] = 1
def printStack():
print( "STACK ", end=" " )
for x in continuation_stack:
print( x[ "task" ], end=" - " )
print( "" )
line_count = 0
for line in trace:
line_count += 1
max_depth = max( max_depth, len( continuation_stack ) )
ev = lineToEvent( line )
curr_cont = continuation_stack[ -1 ]
curr_cont.events.append( ev )
if ev.source == "macro":
if ev.kind == "scheduled":
incrApiKind( ev.name )
call_depths[ len( continuation_stack ) ] += 1
if curr_cont != global_continuation:
parent_of[ ev.tkid ] = ( curr_cont, ev )
elif ev.kind == "ctor":
next_cont = Object()
next_cont.begin = ev
next_cont.events = []
next_cont.children = []
all_continuations.append( next_cont )
curr_cont.events.append( next_cont )
continuation_stack.append( next_cont )
if ev.tkid in parent_of:
( parent_cont, sched_ev ) = parent_of[ ev.tkid ]
parent_cont.children.append( next_cont )
try:
gap = ev.ts - parent_cont.end.ts
if most_recent_task != parent_cont.begin.tkid:
gaps[ "interrupted" ].append( gap )
else:
gaps[ "uninterrupted" ].append( gap )
pc_good += 1
try:
gaps[ sched_ev.name ].append( gap )
except:
gaps[ sched_ev.name ] = [ gap ]
if gap < 100 and ( "RECURRING" not in sched_ev.name ):
print( "!!! %30s %3d" % ( sched_ev.name, gap ) )
except:
missing_end += 1
else:
missing_parent += 1
if len( continuation_stack ) > 3:
pass # printStack()
elif ev.kind == "dtor":
most_recent_task = ev.tkid
blah = continuation_stack.pop()
# print( "BLAH %s" % vars( blah ) )
if ev.tkid != curr_cont.begin.tkid:
print( "TASK MISMATCH '%s' '%s'" % ( ev.tkid, curr_cont.begin.tkid ) )
exit()
macro_lengths.append( ev.ts - curr_cont.begin.ts )
curr_cont.end = ev
if ev.tkid in parent_of:
( parent_cont, sched_ev ) = parent_of[ ev.tkid ]
if sched_ev.name.startswith( "RECURRING" ):
recur_lengths.append( ev.ts - curr_cont.begin.ts )
else:
not_recur_lengths.append( ev.ts - curr_cont.begin.ts )
curr_cont.chain2 = ev.ts - parent_cont.begin.ts
chain2_lengths.append( curr_cont.chain2 )
try:
diff = ev.ts - parent_cont.end.ts
curr_cont.chain3 = parent_cont.chain2 + diff
chain3_lengths.append( curr_cont.chain3 )
try:
curr_cont.chain4 = parent_cont.chain3 + diff
chain4_lengths.append( curr_cont.chain4 )
except Exception as ex:
pass
except Exception as ex:
pass
else:
not_recur_lengths.append( ev.ts - curr_cont.begin.ts )
if ev.recurring:
ev.name = "RECURRING"
try:
( parent_cont, sched_ev ) = parent_of[ ev.tkid ]
if sched_ev.name.startswith( "RECURRING" ):
ev.name = sched_ev.name
else:
ev.name = "RECURRING - %s" % sched_ev.name
except:
pass
parent_of[ ev.tkid ] = ( curr_cont, ev )
elif ev.kind == "canceled" or ev.kind == "all_canceled":
pass
else:
print( "WEIRD MACRO TASK KIND %s" % ev.kind )
exit()
elif ev.source == "micro":
if ev.kind == "enq":
call_depths[ len( continuation_stack ) ] += 1
if curr_cont != global_continuation and parent_of[ "micro" ] == None:
ev.name = "MICRO"
parent_of[ "micro" ] = ( curr_cont, ev )
elif ev.kind == "before_loop":
before_counts.append( ev.num_tasks )
next_cont = Object()
next_cont.begin = ev
next_cont.events = []
next_cont.children = []
all_continuations.append( next_cont )
curr_cont.events.append( next_cont )
continuation_stack.append( next_cont )
if parent_of[ "micro" ] != None:
( parent_cont, sched_ev ) = parent_of[ "micro" ]
parent_cont.children.append( next_cont )
try:
gap = ev.ts - parent_cont.end.ts
if most_recent_task != parent_cont.begin.tkid:
gaps[ "interrupted" ].append( gap )
else:
gaps[ "uninterrupted" ].append( gap )
pc_good += 1
try:
gaps[ "micro" ].append( gap )
except:
gaps[ "micro" ] = [ gap ]
except:
missing_end += 1
else:
missing_parent += 1
if len( continuation_stack ) > 3:
pass # printStack()
elif ev.kind.startswith( "start" ):
if len( continuation_stack ) > 3:
pass #printStack()
micro_depths[ ev.num_tasks ] += 1
elif ev.kind.startswith( "done" ):
pass
elif ev.kind == "after_loop":
continuation_stack.pop()
if curr_cont.begin.tkid != "micro":
print( "MICROTASK MISMATCH %s\n%s" % ( vars( curr_cont.begin ), vars( ev ) ) )
exit()
curr_cont.end = ev
micro_lengths.append( ev.ts - curr_cont.begin.ts )
after_counts.append( ev.count )
try:
( parent_cont, sched_ev ) = parent_of[ "micro" ]
curr_cont.chain2 = ev.ts - parent_cont.begin.ts
chain2_lengths.append( curr_cont.chain2 )
try:
diff = ev.ts - parent.end.ts
curr_cont.chain3 = parent_cont.chain2 + diff
chain3_lengths.append( curr_cont.chain3 )
try:
curr_cont.chain4 = parent_cont.chain3 + diff
chain4_lengths.append( curr_cont.chain4 )
except:
pass
except:
pass
except:
pass
parent_of[ "micro" ] = None
most_recent_task = ev.tkid
else:
print( "WEIRD MICRO TASK KIND %s" % ev.kind )
exit()
elif ev.source == "exec":
curr_cont.events.append( ev )
else:
print( "WEIRD SOURCE %s" %source )
exit()
spans = []
children_per_cont = []
pchildren_per_edge = []
for parent in all_continuations:
n = len( parent.children )
children_per_cont.append( n )
for i in range( n ):
pchildren_per_edge.append( n )
for child in parent.children:
try:
end = child.end.ts
except:
print( "MISSING END" );
continue
try:
begin = parent.begin.ts
except:
print( "MISSING BEGIN" )
continue
span = end - begin
spans.append( span )
print( "w00t? %d %d" % ( max_depth, len( global_continuation.events ) ) )
print( "CALL %s" % call_depths )
print( "MICRO %s" % micro_depths )
print( "Good: %d - Missing Parent: %d - Missing End: %d" %
( pc_good, missing_parent, missing_end ) )
print( "." )
stats = Object()
stats.children_per_cont = children_per_cont
stats.pchildren_per_edge = pchildren_per_edge
stats.macro_lengths = macro_lengths
stats.recur_lengths = recur_lengths
stats.not_recur_lengths = not_recur_lengths
stats.micro_lengths = micro_lengths
stats.before_counts = before_counts
stats.after_counts = after_counts
stats.gaps = gaps
stats.spans = spans
stats.chain2_lengths = chain2_lengths
stats.chain3_lengths = chain3_lengths
stats.chain4_lengths = chain4_lengths
stats.api_kind = api_kind
return stats
def splitByThread( trace ):
traces = {}
def append( tid, line ):
try:
traces[ tid ].append( line )
except:
trace = [ line ]
traces[ tid ] = trace
for line in trace:
append( line[ 2 ], line )
return traces
def analyze( basepath, ranges, render_processes, stats ):
print( "======================== analyze %s ======================== %s" % ( basepath, render_processes ) )
id_path_map = {}
def add( id, fname ):
path = os.path.join( basepath, fname )
try:
id_path_map[ id ].add( path )
except:
paths = { path }
id_path_map[ id ] = paths
r = re.compile( "p(c|i)_(\d+)\." )
for fname in os.listdir( basepath ):
result = r.search( fname )
print( "File: %s %s" % ( fname, result ) )
if result is None:
continue
id = int( result.group( 2 ) )
add( id, fname )
for id in render_processes:
ts1 = datetime.datetime.now()
print( "Starting process: %s" % id )
try:
paths = id_path_map[ id ]
except:
print( "No files for pid %d" % id )
continue
merged = merger.mergeTraces( merger.parseTraces( paths ) )
print( "lines: %d" % ( len( merged ) ) )
for tid, trace in splitByThread( merged ).items():
print( "THREAD %s" % tid )
s = stacker( trace )
combineStats( stats, s )
# tracer( trace )
ts2 = datetime.datetime.now()
print( "[TS] Done with process %s: %s" % ( id, ( ts2 - ts1 ) ) )
def analyzeDir( base_dir, stats ):
print( "======================== analyzeDir %s ========================" % base_dir )
ts1 = datetime.datetime.now()
recording_ranges = parseBeginsAndEnds( os.path.join( base_dir, "Traces" ) )
recording_ranges = [ ( 0, 9999999999999 ) ] # HACK
ts2 = datetime.datetime.now()
print( "[TS] begin-end ranges: %s" % ( ts2 - ts1 ) )
render_processes = parseProcessInfo( os.path.join( base_dir, "stderr.txt" ) )
ts3 = datetime.datetime.now()
print( "[TS] process info: %s" % ( ts3 - ts2 ) )
analyze( os.path.join( base_dir, "Traces" ), recording_ranges, render_processes, stats )
ts4 = datetime.datetime.now()
print( "[TS] kit and caboodle: %s" % ( ts4 - ts3 ) )
def main():
traces_dir = sys.argv[ 1 ] if len( sys.argv ) > 1 else "./Traces"
stats = Object()
stats.children_per_cont = []
stats.pchildren_per_edge = []
stats.macro_lengths = []
stats.recur_lengths = []
stats.not_recur_lengths = []
stats.micro_lengths = []
stats.before_counts = []
stats.after_counts = []
stats.gaps = {}
stats.spans = []
stats.chain2_lengths = []
stats.chain3_lengths = []
stats.chain4_lengths = []
stats.api_kind = {}
for fname in os.listdir( traces_dir ):
if fname == ".DS_Store":
continue
path = os.path.join( traces_dir, fname )
analyzeDir( path, stats )
showStats( stats )
if __name__ == "__main__":
# execute only if run as a script
main()
# par1 = host.twinx()
# par2 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
# par1.set_ylim(0, 4)
# par2.set_ylim(1, 65)
# host.set_xlabel("Distance")
# host.set_ylabel("Density")
# par1.set_ylabel("Temperature")
# par2.set_ylabel("Velocity")
# # color1 = plt.cm.viridis(0)
# # color2 = plt.cm.viridis(0.5)
# # color3 = plt.cm.viridis(.9)
# p1, = host.plot([0, 1, 2], [0, 1, 2], color=color1,label="Density")
# p2, = par1.plot([0, 1, 2], [0, 3, 2], color=color2, label="Temperature")
# p3, = par2.plot([0, 1, 2], [50, 30, 15], color=color3, label="Velocity")
# lns = [p1, p2, p3]
# host.legend(handles=lns, loc='best')
# # right, left, top, bottom
# par2.spines['right'].set_position(('outward', 60))
# # no x-ticks
# # par2.xaxis.set_ticks([])
# # Sometimes handy, same for xaxis
# #par2.yaxis.set_ticks_position('right')
# host.yaxis.label.set_color(p1.get_color())
# par1.yaxis.label.set_color(p2.get_color())
# par2.yaxis.label.set_color(p3.get_color())
# plt.savefig("pyplot_multiple_y-axis.png", bbox_inches='tight')
|
the-stack_106_24047 | import logging
import argparse
import configparser
import os
import torch
import numpy as np
import gym
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.robot import Robot
from crowd_sim.envs.policy.orca import ORCA
import crowd_sim.envs.utils.state as OB
import crowd_sim.envs.utils.action as AC
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--policy', type=str, default='orca')
parser.add_argument('--model_dir', type=str, default=None)
parser.add_argument('--il', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--visualize', default=False, action='store_true')
parser.add_argument('--phase', type=str, default='test')
parser.add_argument('--test_case', type=int, default=None)
parser.add_argument('--square', default=False, action='store_true')
parser.add_argument('--circle', default=False, action='store_true')
parser.add_argument('--video_file', type=str, default=None)
parser.add_argument('--traj', default=False, action='store_true')
args = parser.parse_args()
if args.model_dir is not None:
env_config_file = os.path.join(args.model_dir, os.path.basename(args.env_config))
policy_config_file = os.path.join(args.model_dir, os.path.basename(args.policy_config))
if args.il:
model_weights = os.path.join(args.model_dir, 'il_model.pth')
else:
if os.path.exists(os.path.join(args.model_dir, 'resumed_rl_model.pth')):
model_weights = os.path.join(args.model_dir, 'resumed_rl_model.pth')
else:
model_weights = os.path.join(args.model_dir, 'rl_model.pth')
else:
env_config_file = args.env_config
policy_config_file = args.env_config
# configure logging and device
logging.basicConfig(level=logging.INFO, format='%(asctime)s, %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]()
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
if policy.trainable:
if args.model_dir is None:
parser.error('Trainable policy must be specified with a model weights directory')
policy.get_model().load_state_dict(torch.load(model_weights))
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
print("ENV", env)
if args.square:
env.test_sim = 'square_crossing'
if args.circle:
env.test_sim = 'circle_crossing'
robot = Robot(env_config, 'robot')
robot.set_policy(policy)
env.set_robot(robot)
explorer = Explorer(env, robot, device, gamma=0.9)
policy.set_phase(args.phase)
policy.set_device(device)
# set safety space for ORCA in non-cooperative simulation
if isinstance(robot.policy, ORCA):
if robot.visible:
robot.policy.safety_space = 0
else:
# because invisible case breaks the reciprocal assumption
# adding some safety space improves ORCA performance. Tune this value based on your need.
robot.policy.safety_space = 0
logging.info('ORCA agent buffer: %f', robot.policy.safety_space)
policy.set_env(env)
robot.print_info()
if args.visualize:
ob = env.reset(args.phase, args.test_case)
done = False
print("typ", type(ob), ob[0])
last_pos = np.array(robot.get_position())
while not done:
prk = [OB.ObservableState(_, _, 0, 0, 0) for _ in range(5) ]
action = robot.act(prk)
robot.tt()
# print("the_fuck", the_fuck)
print("CEHCK: ", type(prk), prk[0])
print("act", type(action), ', ', action)
# action = AC.ActionXY(0.1 ,0)
ob, _, done, info = env.step(action)
current_pos = np.array(robot.get_position())
logging.debug('Speed: %.2f', np.linalg.norm(current_pos - last_pos) / robot.time_step)
last_pos = current_pos
# print("deb : ", info)
if args.traj:
env.render('traj', args.video_file)
else:
env.render('video', args.video_file)
logging.info('It takes %.2f seconds to finish. Final status is %s', env.global_time, info)
if robot.visible and info == 'reach goal':
human_times = env.get_human_times()
logging.info('Average time for humans to reach goal: %.2f', sum(human_times) / len(human_times))
else:
explorer.run_k_episodes(env.case_size[args.phase], args.phase, print_failure=True)
if __name__ == '__main__':
main()
|
the-stack_106_24048 | import argparse
import runpy
import sys
from .logger import LogLevel
from .reloader import start_reloader
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", dest="module", required=True)
parser.add_argument("-w", dest="watch", action="append")
parser.add_argument("-v", dest="verbose", action='store_true')
parser.add_argument("-q", dest="quiet", action='store_true')
args, unknown_args = parser.parse_known_args()
if args.quiet:
level = LogLevel.ERROR
elif args.verbose:
level = LogLevel.DEBUG
else:
level = LogLevel.INFO
reloader = start_reloader(
"hupper.cli.main",
verbose=level,
)
sys.argv[1:] = unknown_args
sys.path.insert(0, "")
if args.watch:
reloader.watch_files(args.watch)
return runpy.run_module(
args.module,
alter_sys=True,
run_name="__main__")
|
the-stack_106_24049 | # Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import unittest
from openfold.model.evoformer import (
MSATransition,
EvoformerStack,
ExtraMSAStack,
)
from openfold.utils.tensor_utils import tree_map
import tests.compare_utils as compare_utils
from tests.config import consts
if compare_utils.alphafold_is_installed():
alphafold = compare_utils.import_alphafold()
import jax
import haiku as hk
class TestEvoformerStack(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c_z = consts.c_z
c_hidden_msa_att = 12
c_hidden_opm = 17
c_hidden_mul = 19
c_hidden_pair_att = 14
c_s = consts.c_s
no_heads_msa = 3
no_heads_pair = 7
no_blocks = 2
transition_n = 2
msa_dropout = 0.15
pair_stack_dropout = 0.25
inf = 1e9
eps = 1e-10
es = EvoformerStack(
c_m,
c_z,
c_hidden_msa_att,
c_hidden_opm,
c_hidden_mul,
c_hidden_pair_att,
c_s,
no_heads_msa,
no_heads_pair,
no_blocks,
transition_n,
msa_dropout,
pair_stack_dropout,
blocks_per_ckpt=None,
inf=inf,
eps=eps,
).eval()
m = torch.rand((batch_size, n_seq, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
msa_mask = torch.randint(0, 2, size=(batch_size, n_seq, n_res))
pair_mask = torch.randint(0, 2, size=(batch_size, n_res, n_res))
shape_m_before = m.shape
shape_z_before = z.shape
m, z, s = es(
m, z, chunk_size=4, msa_mask=msa_mask, pair_mask=pair_mask
)
self.assertTrue(m.shape == shape_m_before)
self.assertTrue(z.shape == shape_z_before)
self.assertTrue(s.shape == (batch_size, n_res, c_s))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_ei(activations, masks):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
ei = alphafold.model.modules.EvoformerIteration(
c_e, config.model.global_config, is_extra_msa=False
)
return ei(activations, masks, is_training=False)
f = hk.transform(run_ei)
n_res = consts.n_res
n_seq = consts.n_seq
activations = {
"msa": np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32),
"pair": np.random.rand(n_res, n_res, consts.c_z).astype(np.float32),
}
masks = {
"msa": np.random.randint(0, 2, (n_seq, n_res)).astype(np.float32),
"pair": np.random.randint(0, 2, (n_res, n_res)).astype(np.float32),
}
params = compare_utils.fetch_alphafold_module_weights(
"alphafold/alphafold_iteration/evoformer/evoformer_iteration"
)
params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)
key = jax.random.PRNGKey(42)
out_gt = f.apply(params, key, activations, masks)
jax.tree_map(lambda x: x.block_until_ready(), out_gt)
out_gt_msa = torch.as_tensor(np.array(out_gt["msa"]))
out_gt_pair = torch.as_tensor(np.array(out_gt["pair"]))
model = compare_utils.get_global_pretrained_openfold()
out_repro_msa, out_repro_pair = model.evoformer.blocks[0](
torch.as_tensor(activations["msa"]).cuda(),
torch.as_tensor(activations["pair"]).cuda(),
torch.as_tensor(masks["msa"]).cuda(),
torch.as_tensor(masks["pair"]).cuda(),
chunk_size=4,
_mask_trans=False,
)
out_repro_msa = out_repro_msa.cpu()
out_repro_pair = out_repro_pair.cpu()
assert torch.max(torch.abs(out_repro_msa - out_gt_msa) < consts.eps)
assert torch.max(torch.abs(out_repro_pair - out_gt_pair) < consts.eps)
class TestExtraMSAStack(unittest.TestCase):
def test_shape(self):
batch_size = 2
s_t = 23
n_res = 5
c_m = 7
c_z = 11
c_hidden_msa_att = 12
c_hidden_opm = 17
c_hidden_mul = 19
c_hidden_tri_att = 16
no_heads_msa = 3
no_heads_pair = 8
no_blocks = 2
transition_n = 5
msa_dropout = 0.15
pair_stack_dropout = 0.25
inf = 1e9
eps = 1e-10
es = ExtraMSAStack(
c_m,
c_z,
c_hidden_msa_att,
c_hidden_opm,
c_hidden_mul,
c_hidden_tri_att,
no_heads_msa,
no_heads_pair,
no_blocks,
transition_n,
msa_dropout,
pair_stack_dropout,
blocks_per_ckpt=None,
inf=inf,
eps=eps,
).eval()
m = torch.rand((batch_size, s_t, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
msa_mask = torch.randint(
0,
2,
size=(
batch_size,
s_t,
n_res,
),
)
pair_mask = torch.randint(
0,
2,
size=(
batch_size,
n_res,
n_res,
),
)
shape_z_before = z.shape
z = es(m, z, chunk_size=4, msa_mask=msa_mask, pair_mask=pair_mask)
self.assertTrue(z.shape == shape_z_before)
class TestMSATransition(unittest.TestCase):
def test_shape(self):
batch_size = 2
s_t = 3
n_r = 5
c_m = 7
n = 11
mt = MSATransition(c_m, n)
m = torch.rand((batch_size, s_t, n_r, c_m))
shape_before = m.shape
m = mt(m, chunk_size=4)
shape_after = m.shape
self.assertTrue(shape_before == shape_after)
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_transition(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_trans = alphafold.model.modules.Transition(
c_e.msa_transition,
config.model.global_config,
name="msa_transition",
)
act = msa_trans(act=msa_act, mask=msa_mask)
return act
f = hk.transform(run_msa_transition)
n_res = consts.n_res
n_seq = consts.n_seq
msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
msa_mask = np.ones((n_seq, n_res)).astype(
np.float32
) # no mask here either
# Fetch pretrained parameters (but only from one block)]
params = compare_utils.fetch_alphafold_module_weights(
"alphafold/alphafold_iteration/evoformer/evoformer_iteration/"
+ "msa_transition"
)
params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = (
model.evoformer.blocks[0]
.msa_transition(
torch.as_tensor(msa_act, dtype=torch.float32).cuda(),
mask=torch.as_tensor(msa_mask, dtype=torch.float32).cuda(),
)
.cpu()
)
self.assertTrue(torch.max(torch.abs(out_gt - out_repro) < consts.eps))
if __name__ == "__main__":
unittest.main()
|
the-stack_106_24050 | import lambentlight.client as client
async def show_info():
"""
Shows the basic information of the LambentLight instance.
"""
# Request the basic info and print it
info = await client.get("/")
print("{0} v{1} running on {2}".format(info["prog"], info["version"], client.host))
|
the-stack_106_24051 | """
Chronic is a module designed to do simple profiling of your python code while
giving you full control of the granularity of measurement. It maintains the
hierarchy of the call tree, but only at the levels you care about. The timing
results can easily be captured as JSON and logged for analysis in postgres or
mongodb.
You may use the @time decorator or the Timer context manager:
>>> @time
>>> def func():
>>> with Timer('bar'):
>>> pass
>>> func()
>>> timings
{
'func': {
'total_elapsed': 38.5,
'count': 1,
'average_elapsed': 38.5
'timings': {
'bar': {
'total_elapsed': 20.52,
'count': 1
'average_elapsed': 20.52
}
}
}
}
"""
import sys
import time as systime
import threading
from functools import partial, wraps
from signals import Signal
from proxy import Proxy
_local = threading.local()
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
_clock = systime.clock
else:
# On most other platforms the best timer is time.time()
_clock = systime.time
post_timing = Signal(name='post timing')
class Timer(object):
'''A context manager for timing blocks of code.
Use chronic.timings to obtain the results.
Arguments:
name -- A unique name for the timing.
clock -- A function that returns the current time. Mostly for testing,
default is the system clock.
'''
def __init__(self, name, clock=None):
self.name = name
self._clock = clock if clock is not None else _clock
def _push(self):
if not hasattr(_local, 'stopwatch'):
_local.stopwatch = {'current': {}, 'stack': []}
current = _local.stopwatch['current']
_local.stopwatch['stack'].append((self.name, current))
current['timings'] = current.get('timings', {})
current['timings'][self.name] = current['timings'].get(self.name, {})
new = current['timings'][self.name]
_local.stopwatch['current'] = new
def _pop(self):
_, last = _local.stopwatch['stack'].pop()
_local.stopwatch['current'] = last
def __enter__(self):
self._push()
self.start = self._clock()
def __exit__(self, type, val, tb):
elapsed = self._clock() - self.start
current = _local.stopwatch['current']
current['total_elapsed'] = elapsed + current.get('total_elapsed', 0)
current['count'] = 1 + current.get('count', 0)
current['average_elapsed'] = (current['total_elapsed'] /
current['count'])
current_stack = stack
self._pop()
post_timing.emit(elapsed, current, current_stack)
def time(func=None, name=None, clock=None):
'''A decorator for timing function calls.
Use chronic.timings to obtain the results.
Arguments:
name -- A unique name for the timing. Defaults to the function name.
clock -- A function that returns the current time. Mostly for testing,
default is the system clock.
'''
# When this decorator is used with optional parameters:
#
# @time(name='timed_thing')
# def func():
# pass
#
# The result of this decorator should be a function which will receive
# the function to wrap as an argument, and return the wrapped function.
if func is None:
return partial(time, name=name, clock=clock)
@wraps(func)
def _inner(*args, **kws):
with Timer(name or func.__name__, clock=clock):
result = func(*args, **kws)
return result
return _inner
def _get_timings():
if hasattr(_local, 'stopwatch'):
return _local.stopwatch['current'].get('timings', {})
def _get_stack():
if hasattr(_local, 'stopwatch'):
return tuple(name for name, _ in _local.stopwatch['stack'])
timings = Proxy(_get_timings, doc='''This variable always holds all completed
timing information for the current scope. Information is available as a dict
with a key for each timing (the name of the timer). The value of each timing
is a dict with three keys:
* total_elapsed: the elapsed execution time of the code (including all
subtimings) for all runs of this block. The unit is seconds by default.
If you pass in your own clock function, the unit is whatever the unit of
the clock.
* count: the number of times the timed block was run.
* average_elapsed: the average elapsed time for each run of this block.
* timings: a dict of all subtimings that were completed while inside this
block
''')
stack = Proxy(_get_stack)
def clear():
_local.stopwatch = {'current': {}, 'stack': []}
|
the-stack_106_24053 | import torch.utils.data as data
import torch
class DataIterator(object):
def __init__(self, dataloader):
self.dataloader = dataloader
self.iterator = enumerate(self.dataloader)
def __next__(self):
try:
_, data = next(self.iterator)
except Exception:
self.iterator = enumerate(self.dataloader)
_, data = next(self.iterator)
return data[0], data[1]
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize an iterator over the dataset.
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
# Dataset exhausted, use a new fresh iterator.
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
|
the-stack_106_24054 | from django.shortcuts import render
#from django.http import HttpResponse
#from django.http import Http404
#from lxml import etree
import imports
from eulexistdb import db
# load the 2014 and 2015 cve files on server launch
#cvestub = "static/data/nist.gov/nvd/cve/nvdcve-2.0-"
#tree2014 = etree.parse(cvestub+"2014"+".xml")
#tree2015 = etree.parse(cvestub+"2015"+".xml")
class connExistDB:
def __init__(self):
self.db = db.ExistDB()
def get_data(self, query):
result = list()
qresult = self.db.executeQuery(query)
hits = self.db.getHits(qresult)
for i in range(hits):
result.append(str(self.db.retrieve(qresult, i)))
return result
def cve_index(request, cveyear):
if cveyear == None: return render(request, 'cvrf_index.html')
qrystr='''xquery version "3.0";
declare namespace vuln = "http://scap.nist.gov/schema/vulnerability/0.4";
declare namespace nvd = "http://scap.nist.gov/schema/feed/vulnerability/2.0";
let $year := '''+cveyear+'''
let $thisdoc := concat("/db/cyberxml/data/cve/nist.gov/nvdcve-2.0-",$year,".xml")
for $v in doc($thisdoc)/nvd:nvd/nvd:entry
let $last := substring($v/vuln:last-modified-datetime/text(),1,10)
let $first := substring($v/vuln:published-datetime/text(),1,10)
let $id := $v/vuln:cve-id/text()
let $name := $v/vuln:summary/text()
return <tr><td><a href="/cve/nist/xml/{$id}">{$id}</a></td><td>{$name}</td><td>{$first}</td><td>{$last}</td></tr>
'''
a = connExistDB()
idx =a.get_data(qrystr)
return render(request, 'cve_catalog.html', {'idx':idx, 'year':cveyear, 'qstr':qrystr})
def cvexml(request, cvenum):
cveyear=cvenum.split('-')[1]
if cveyear == None: return render(request, 'cvrf_index.html')
qrystr='''xquery version "3.0";
declare namespace nvd = "http://scap.nist.gov/schema/feed/vulnerability/2.0";
declare namespace cvss="http://scap.nist.gov/schema/cvss-v2/0.2";
declare namespace cpe-lang="http://cpe.mitre.org/language/2.0" ;
declare namespace vuln="http://scap.nist.gov/schema/vulnerability/0.4";
declare namespace patch="http://scap.nist.gov/schema/patch/0.1" ;
declare namespace scap-core="http://scap.nist.gov/schema/scap-core/0.1";
let $year := "'''+cveyear+'''"
let $thisdoc := concat("/db/cyberxml/data/cve/nist.gov/nvdcve-2.0-",$year,".xml")
let $input := doc($thisdoc)/nvd:nvd/nvd:entry[@id="'''+cvenum+'''"]
let $xsl := doc("/db/cyberxml/styles/xsl/cve.xsl")
return
transform:transform($input, $xsl, ())
'''
a = connExistDB()
idx =a.get_data(qrystr)
return render(request, 'cve_xml.html', {'idx':idx, 'cvenum':cvenum, 'qstr':qrystr})
'''
# redo this
def rawxml(request,cvenum):
cvenum = cvenum.upper()
try:
cveyear = cvenum.split('-')[1]
tpath=('//{http://scap.nist.gov/schema/feed/vulnerability/2.0}entry[contains(@id,"'+cvenum+'")]')
findall = etree.ETXPath(tpath)
if cveyear == '2015':
return HttpResponse(etree.tostring(findall(tree2015)[0]), content_type="application/xml")
elif cveyear == '2014':
return HttpResponse(etree.tostring(findall(tree2014)[0]), content_type="application/xml")
else:
raise Http404
except:
raise Http404
'''
#@login_required
def import_nist_cve(request):
if request.method == 'POST':
try:
files=imports.import_nist_cve()
return render(request, 'cve_import.html', {'files':files,})
except:
return render(request, 'cve_import.html', {'error_message': "request failed",})
else:
return render(request, 'cve_import.html')
|
the-stack_106_24055 | import sys
from types import MappingProxyType, DynamicClassAttribute
__all__ = [
'EnumMeta',
'Enum', 'IntEnum', 'Flag', 'IntFlag',
'auto', 'unique',
]
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, proto):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
_auto_null = object()
class auto:
"""
Instances are replaced with an appropriate value in Enum class suites.
"""
value = _auto_null
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super().__init__()
self._member_names = []
self._last_values = []
self._ignore = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
"""
if _is_sunder(key):
if key not in (
'_order_', '_create_pseudo_member_',
'_generate_next_value_', '_missing_', '_ignore_',
):
raise ValueError('_names_ are reserved for future Enum use')
if key == '_generate_next_value_':
setattr(self, '_generate_next_value', value)
elif key == '_ignore_':
if isinstance(value, str):
value = value.replace(',',' ').split()
else:
value = list(value)
self._ignore = value
already = set(value) & set(self._member_names)
if already:
raise ValueError('_ignore_ cannot specify already set names: %r' % (already, ))
elif _is_dunder(key):
if key == '__order__':
key = '_order_'
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif key in self._ignore:
pass
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('%r already defined as: %r' % (key, self[key]))
if isinstance(value, auto):
if value.value == _auto_null:
value.value = self._generate_next_value(key, 1, len(self._member_names), self._last_values[:])
value = value.value
self._member_names.append(key)
self._last_values.append(value)
super().__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicitly checks for it, but of course
# until EnumMeta finishes running the first time the Enum class doesn't exist.
# This is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
# create the namespace dict
enum_dict = _EnumDict()
# inherit previous flags and _generate_next_value_ function
member_type, first_enum = metacls._get_mixins_(bases)
if first_enum is not None:
enum_dict['_generate_next_value_'] = getattr(first_enum, '_generate_next_value_', None)
return enum_dict
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
#
# remove any keys listed in _ignore_
classdict.setdefault('_ignore_', []).append('_ignore_')
ignore = classdict['_ignore_']
for key in ignore:
classdict.pop(key, None)
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
enum_members = {k: classdict[k] for k in classdict._member_names}
for name in classdict._member_names:
del classdict[name]
# adjust the sunders
_order_ = classdict.pop('_order_', None)
# check for illegal enum names (any others?)
invalid_names = set(enum_members) & {'mro', }
if invalid_names:
raise ValueError('Invalid enum member name: {0}'.format(
','.join(invalid_names)))
# create a default docstring if one has not been provided
if '__doc__' not in classdict:
classdict['__doc__'] = 'An enumeration.'
# create our new Enum type
enum_class = super().__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in definition order
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = {a for b in enum_class.mro() for a in b.__dict__}
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
for member_name in classdict._member_names:
value = enum_members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
if member_type is object:
enum_member._value_ = value
else:
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member._value_ == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if obj_method is not None and obj_method is class_method:
setattr(enum_class, name, enum_method)
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
enum_class.__new_member__ = __new__
enum_class.__new__ = Enum.__new__
# py3 support for definition order (helps keep py2/py3 code in sync)
if _order_ is not None:
if isinstance(_order_, str):
_order_ = _order_.replace(',', ' ').split()
if _order_ != enum_class._member_names_:
raise TypeError('member order does not match _order_')
return enum_class
def __bool__(self):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='RED GREEN BLUE')).
When used for the functional API:
`value` will be the name of the new class.
`names` should be either a string of white-space/comma delimited names
(values will start at `start`), or an iterator/mapping of name, value pairs.
`module` should be set to the module this class is being created in;
if it is not set, an attempt to find that module will be made, but if
it fails the class will not be picklable.
`qualname` should be set to the actual location this class can be found
at in its module; by default it is set to the global scope. If this is
not correct, unpickling will fail in some circumstances.
`type`, if set, will be mixed in as the first base class.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member._name_ in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super().__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name) from None
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __len__(cls):
return len(cls._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a read-only view of the internal mapping.
"""
return MappingProxyType(cls._member_map_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super().__setattr__(name, value)
def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are incremented by 1 from `start`.
* An iterable of member names. Values are incremented by 1 from `start`.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value pairs.
"""
metacls = cls.__class__
bases = (cls, ) if type is None else (type, cls)
_, first_enum = cls._get_mixins_(bases)
classdict = metacls.__prepare__(class_name, bases)
# special processing needed for names?
if isinstance(names, str):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and names and isinstance(names[0], str):
original_names, names = names, []
last_values = []
for count, name in enumerate(original_names):
value = first_enum._generate_next_value_(name, start, count, last_values[:])
last_values.append(value)
names.append((name, value))
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, str):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError) as exc:
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
if qualname is not None:
enum_class.__qualname__ = qualname
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __new_member__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __new_member__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __new_member__ before falling back to
# __new__
for method in ('__new_member__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in {
None,
None.__new__,
object.__new__,
Enum.__new__,
}:
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
class Enum(metaclass=EnumMeta):
"""Generic enumeration.
Derive from this class to define new enumerations.
"""
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.RED)
return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member._value_ == value:
return member
# still not found -- try _missing_ hook
return cls._missing_(value)
def _generate_next_value_(name, start, count, last_values):
for last_value in reversed(last_values):
try:
return last_value + 1
except TypeError:
pass
else:
return start
@classmethod
def _missing_(cls, value):
raise ValueError("%r is not a valid %s" % (value, cls.__name__))
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__'] + added_behavior)
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self._value_
return cls.__format__(val, format_spec)
def __hash__(self):
return hash(self._name_)
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
# DynamicClassAttribute is used to provide access to the `name` and
# `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@DynamicClassAttribute
def name(self):
"""The name of the Enum member."""
return self._name_
@DynamicClassAttribute
def value(self):
"""The value of the Enum member."""
return self._value_
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
# _value2member_map_ is populated in the same order every time
# for a consistent reverse mapping of number to name when there
# are multiple names for the same number.
members = [
(name, value)
for name, value in source.items()
if filter(name)]
try:
# sort by value
members.sort(key=lambda t: (t[1], t[0]))
except TypeError:
# unless some values aren't comparable, in which case sort by name
members.sort(key=lambda t: t[0])
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
class Flag(Enum):
"""Support for flags"""
def _generate_next_value_(name, start, count, last_values):
"""
Generate the next value when not given.
name: the name of the member
start: the initital start value or None
count: the number of existing members
last_value: the last value assigned or None
"""
if not count:
return start if start is not None else 1
for last_value in reversed(last_values):
try:
high_bit = _high_bit(last_value)
break
except Exception:
raise TypeError('Invalid Flag value: %r' % last_value) from None
return 2 ** (high_bit+1)
@classmethod
def _missing_(cls, value):
original_value = value
if value < 0:
value = ~value
possible_member = cls._create_pseudo_member_(value)
if original_value < 0:
possible_member = ~possible_member
return possible_member
@classmethod
def _create_pseudo_member_(cls, value):
"""
Create a composite member iff value contains only members.
"""
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
# verify all bits are accounted for
_, extra_flags = _decompose(cls, value)
if extra_flags:
raise ValueError("%r is not a valid %s" % (value, cls.__name__))
# construct a singleton enum pseudo-member
pseudo_member = object.__new__(cls)
pseudo_member._name_ = None
pseudo_member._value_ = value
# use setdefault in case another thread already created a composite
# with this value
pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)
return pseudo_member
def __contains__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other._value_ & self._value_ == other._value_
def __repr__(self):
cls = self.__class__
if self._name_ is not None:
return '<%s.%s: %r>' % (cls.__name__, self._name_, self._value_)
members, uncovered = _decompose(cls, self._value_)
return '<%s.%s: %r>' % (
cls.__name__,
'|'.join([str(m._name_ or m._value_) for m in members]),
self._value_,
)
def __str__(self):
cls = self.__class__
if self._name_ is not None:
return '%s.%s' % (cls.__name__, self._name_)
members, uncovered = _decompose(cls, self._value_)
if len(members) == 1 and members[0]._name_ is None:
return '%s.%r' % (cls.__name__, members[0]._value_)
else:
return '%s.%s' % (
cls.__name__,
'|'.join([str(m._name_ or m._value_) for m in members]),
)
def __bool__(self):
return bool(self._value_)
def __or__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__class__(self._value_ | other._value_)
def __and__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__class__(self._value_ & other._value_)
def __xor__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__class__(self._value_ ^ other._value_)
def __invert__(self):
members, uncovered = _decompose(self.__class__, self._value_)
inverted = self.__class__(0)
for m in self.__class__:
if m not in members and not (m._value_ & self._value_):
inverted = inverted | m
return self.__class__(inverted)
class IntFlag(int, Flag):
"""Support for integer-based Flags"""
@classmethod
def _missing_(cls, value):
if not isinstance(value, int):
raise ValueError("%r is not a valid %s" % (value, cls.__name__))
new_member = cls._create_pseudo_member_(value)
return new_member
@classmethod
def _create_pseudo_member_(cls, value):
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
need_to_create = [value]
# get unaccounted for bits
_, extra_flags = _decompose(cls, value)
# timer = 10
while extra_flags:
# timer -= 1
bit = _high_bit(extra_flags)
flag_value = 2 ** bit
if (flag_value not in cls._value2member_map_ and
flag_value not in need_to_create
):
need_to_create.append(flag_value)
if extra_flags == -flag_value:
extra_flags = 0
else:
extra_flags ^= flag_value
for value in reversed(need_to_create):
# construct singleton pseudo-members
pseudo_member = int.__new__(cls, value)
pseudo_member._name_ = None
pseudo_member._value_ = value
# use setdefault in case another thread already created a composite
# with this value
pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)
return pseudo_member
def __or__(self, other):
if not isinstance(other, (self.__class__, int)):
return NotImplemented
result = self.__class__(self._value_ | self.__class__(other)._value_)
return result
def __and__(self, other):
if not isinstance(other, (self.__class__, int)):
return NotImplemented
return self.__class__(self._value_ & self.__class__(other)._value_)
def __xor__(self, other):
if not isinstance(other, (self.__class__, int)):
return NotImplemented
return self.__class__(self._value_ ^ self.__class__(other)._value_)
__ror__ = __or__
__rand__ = __and__
__rxor__ = __xor__
def __invert__(self):
result = self.__class__(~self._value_)
return result
def _high_bit(value):
"""returns index of highest bit, or -1 if value is zero or negative"""
return value.bit_length() - 1
def unique(enumeration):
"""Class decorator for enumerations ensuring unique member values."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
alias_details = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates])
raise ValueError('duplicate values found in %r: %s' %
(enumeration, alias_details))
return enumeration
def _decompose(flag, value):
"""Extract all members from the value."""
# _decompose is only called if the value is not named
not_covered = value
negative = value < 0
# issue29167: wrap accesses to _value2member_map_ in a list to avoid race
# conditions between iterating over it and having more pseudo-
# members added to it
if negative:
# only check for named flags
flags_to_check = [
(m, v)
for v, m in list(flag._value2member_map_.items())
if m.name is not None
]
else:
# check for named flags and powers-of-two flags
flags_to_check = [
(m, v)
for v, m in list(flag._value2member_map_.items())
if m.name is not None or _power_of_two(v)
]
members = []
for member, member_value in flags_to_check:
if member_value and member_value & value == member_value:
members.append(member)
not_covered &= ~member_value
if not members and value in flag._value2member_map_:
members.append(flag._value2member_map_[value])
members.sort(key=lambda m: m._value_, reverse=True)
if len(members) > 1 and members[0].value == value:
# we have the breakdown, don't need the value member itself
members.pop(0)
return members, not_covered
def _power_of_two(value):
if value < 1:
return False
return value == 2 ** _high_bit(value)
|
the-stack_106_24056 | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import datetime
from packaging.version import parse
rootdir = os.path.join(
os.getenv("SPHINX_MULTIVERSION_SOURCEDIR", default=os.getcwd()), ".."
)
sys.path.insert(0, rootdir)
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
[print(p) for p in sys.path]
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
import fairlearn # noqa: E402
print(fairlearn.__version__)
print("================================")
# -- Project information -----------------------------------------------------
project = "Fairlearn"
copyright = f"2018 - {datetime.now().year}, Fairlearn contributors"
author = "Fairlearn contributors"
# The full version, including alpha/beta/rc tags
release = fairlearn.__version__
def check_if_v046():
"""Check to see if current version being built is v0.4.6."""
result = False
if fairlearn.__version__ == "0.4.6":
print("Detected 0.4.6 in fairlearn.__version__")
result = True
smv_name = os.getenv("SPHINX_MULTIVERSION_NAME")
if smv_name is not None:
print("Found SPHINX_MULTIVERSION_NAME: ", smv_name)
result = smv_name == "v0.4.6"
else:
print("SPHINX_MULTIVERSION_NAME not in environment")
return result
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"bokeh.sphinxext.bokeh_plot",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_gallery.gen_gallery",
"sphinx_multiversion",
"sphinx_autodoc_typehints", # needs to be AFTER napoleon
"numpydoc",
]
source_suffix = [".rst"]
intersphinx_mapping = {
"python3": ("https://docs.python.org/3", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"sklearn": ("https://scikit-learn.org/stable/", None),
"matplotlib": (
"https://matplotlib.org/",
None,
),
"tensorflow": (
"https://www.tensorflow.org/api_docs/python",
"https://raw.githubusercontent.com/GPflow/"
"tensorflow-intersphinx/master/tf2_py_objects.inv",
),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.rst"]
master_doc = "index"
# Multiversion settings
# Show only the highest patch versions of each minor version.
# Example: include 0.4.6, but not 0.4.0 to 0.4.5
smv_tag_whitelist = r"^v0\.4\.6|^v0\.5\.0|^v0\.6\.2|^v0\.7\.0+$"
smv_branch_whitelist = r"^main$"
if check_if_v046():
print("Current version is v0.4.6, will apply overrides")
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"logo_link": "https://fairlearn.org",
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/fairlearn/fairlearn",
"icon": "fab fa-github",
},
{
"name": "Twitter",
"url": "https://twitter.com/fairlearn",
"icon": "fab fa-twitter",
},
{
"name": "StackOverflow",
"url": "https://stackoverflow.com/questions/tagged/fairlearn",
"icon": "fab fa-stack-overflow",
},
{
"name": "Discord",
"url": "https://discord.gg/R22yCfgsRn",
"icon": "fab fa-discord",
},
],
"show_prev_next": False,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/images/fairlearn_full_color.svg"
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no index is generated.
html_use_index = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Use filename_pattern so that plot_adult_dataset is not
# included in the gallery, but its plot is available for
# the quickstart
sphinx_gallery_conf = {
"examples_dirs": "../examples",
"gallery_dirs": "auto_examples",
# pypandoc enables rst to md conversion in downloadable notebooks
"pypandoc": True,
}
html_sidebars = {
"**": ["version-sidebar.html", "search-field.html", "sidebar-nav-bs.html"],
}
# Auto-Doc Options
# ----------------
# Change the ordering of the member documentation
autodoc_member_order = "groupwise"
# Linking Code
# ------------
# The following is used by sphinx.ext.linkcode to provide links to github
# based on pandas doc/source/conf.py
def linkcode_resolve(domain, info):
"""Determine the URL corresponding to Python object."""
if domain != "py":
return None
modname = info["module"]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
fn = inspect.getsourcefile(inspect.unwrap(obj))
except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
lineno = None
if lineno:
linespec = f"#L{lineno}-L{lineno + len(source) - 1}"
else:
linespec = ""
tag_or_branch = os.getenv("SPHINX_MULTIVERSION_NAME", default="main")
fn = os.path.relpath(fn, start=os.path.dirname(fairlearn.__file__)).replace(
os.sep, "/"
)
return f"http://github.com/fairlearn/fairlearn/blob/{tag_or_branch}/fairlearn/{fn}{linespec}"
# -- LaTeX macros ------------------------------------------------------------
mathjax3_config = {
"tex": {
"macros": {
"E": "{\\mathbb{E}}",
"P": "{\\mathbb{P}}",
"given": "\\mathbin{\\vert}",
}
}
}
def check_if_v07():
"""Check to see if current version being built is > v0.7."""
result = False
if parse(fairlearn.__version__) > parse("0.7"):
print("Detected version > 0.7 in fairlearn.__version__")
result = True
return result
# Setup for sphinx-bibtex
# Only use sphinx-bibtex if version is above 0.7
if check_if_v07():
extensions += [
"sphinxcontrib.bibtex",
]
bibtex_bibfiles = ["refs.bib"]
|
the-stack_106_24057 | """
This requires the MLCube 2.0 that's located somewhere in one of dev branches.
"""
import os
import click
import logging
import typing as t
from omegaconf import (OmegaConf, DictConfig)
logger = logging.getLogger(__name__)
class MLCubeConfig(object):
@staticmethod
def ensure_values_exist(config: DictConfig, keys: t.Union[t.Text, t.List], constructor: t.Callable) -> t.List:
if isinstance(keys, str):
keys = [keys]
for key in keys:
if config.get(key, None) is None:
config[key] = constructor()
return [config[key] for key in keys]
@staticmethod
def get_uri(value: t.Text) -> t.Text:
if value.startswith('storage:'):
raise ValueError(f"Storage schema is not yet supported")
return os.path.abspath(os.path.expanduser(value))
@staticmethod
def create_runtime_config(root: t.Text, workspace: t.Optional[t.Text] = None) -> DictConfig:
""" Return base configuration for all MLCubes.
Args:
root: Path to MLCube root directory.
workspace: Workspace path to use in this MLCube run.
Returns:
Base configuration.
"""
runtime_config = OmegaConf.create({
# This configuration contains single entry - `runtime`. It is assumed that users do not use `runtime` key.
'runtime': {
# MLCube root folder
'root': root,
# Path to a default workspace which is located inside MLCube root directory. We need this to copy
# configuration files to user-provided workspaces.
'default_workspace': '${runtime.root}/workspace',
# Default workspace path
'workspace': '${runtime.root}/workspace' if workspace is None else MLCubeConfig.get_uri(workspace),
# Default path to a global (user) config.
'global_config': {
'uri': '${oc.env:MLCUBE_GLOBAL_CONFIG, ${oc.env:HOME}/.mlcube.yaml}',
'ignore': False
}
}
})
return runtime_config
@staticmethod
def create_mlcube_config(mlcube_config_file: t.Text, mlcube_cli_args: DictConfig, task_cli_args: t.Dict,
platform: t.Optional[t.Text], workspace: t.Optional[t.Text] = None,
resolve: bool = True) -> DictConfig:
""" Create MLCube config merging different configs - base, global, local and cli.
Args:
mlcube_config_file: Path to mlcube.yaml file.
mlcube_cli_args: MLCube config from command line.
task_cli_args: Task parameters from command line.
platform: Runner name.
workspace: Workspace path to use in this MLCube run.
resolve: If true, compute all values (some of them may reference other parameters or environmental
variables).
"""
# TODO: sergey - it's not really clear now why I use list here.
platforms = [platform] if platform else []
# Merge default runtime config, local mlcube config and mlcube config from CLI.
mlcube_config = OmegaConf.merge(
MLCubeConfig.create_runtime_config(os.path.dirname(mlcube_config_file), workspace),
OmegaConf.load(mlcube_config_file),
mlcube_cli_args
)
# If available, load global MLCube config. We really need only the right platform section from global config.
if not mlcube_config.runtime.global_config.ignore:
uri = mlcube_config.runtime.global_config.uri
try:
global_config = OmegaConf.load(uri)
if len(platforms) != 0:
global_config = OmegaConf.create({
platform: global_config.get(platform, {}) for platform in platforms
})
mlcube_config = OmegaConf.merge(global_config, mlcube_config)
except (IsADirectoryError, FileNotFoundError):
logger.warning("Global configuration (%s) not loaded.", uri)
for task_name in mlcube_config.tasks.keys():
[task] = MLCubeConfig.ensure_values_exist(mlcube_config.tasks, task_name, dict)
[parameters] = MLCubeConfig.ensure_values_exist(task, 'parameters', dict)
[inputs, outputs] = MLCubeConfig.ensure_values_exist(parameters, ['inputs', 'outputs'], dict)
MLCubeConfig.check_parameters(inputs, 'input', task_cli_args)
MLCubeConfig.check_parameters(outputs, 'output', task_cli_args)
if resolve:
OmegaConf.resolve(mlcube_config)
return mlcube_config
@staticmethod
def check_parameters(parameters: DictConfig, io: t.Text, task_cli_args: t.Dict) -> None:
""" Check that task parameters are defined according to MLCube schema.
Args:
parameters: Task parameters (`inputs` or `outputs`).
io: `input` or `output`.
task_cli_args: Task parameters from command line.
This function does not set `type` of parameters (if not present) in all cases.
"""
for name in parameters.keys():
# The `_param_name` is anyway there, so check it's not None.
[param_def] = MLCubeConfig.ensure_values_exist(parameters, name, dict)
# Deal with the case when value is a string (default value).
if isinstance(param_def, str):
parameters[name] = {'default': param_def}
param_def = parameters[name]
# If `default` key is not present, use parameter name as value.
_ = MLCubeConfig.ensure_values_exist(param_def, 'default', lambda: name)
# Finally, see if there is value on a command line
param_def.default = task_cli_args.get(name, param_def.default)
# It's here probably temporarily. Does not make too much sense to check for input types, since inputs
# might not be in the workspace yet (both independent and dependent).
_ = MLCubeConfig.ensure_values_exist(param_def, 'type', lambda: 'unknown')
if io == 'output' and param_def.type == 'unknown' and param_def.default.endswith(os.sep):
param_def.type = 'directory'
# Resolve path if it's relative (meaning it's relative to workspace directory.)
# This should be done in a runner (for instance, this MLCube can run someplace else on a remote host).
# _param_def.default = os.path.abspath(os.path.join(mlcube_config.runtime.workspace,
# _param_def.default))
class CliParser(object):
@staticmethod
def parse_mlcube_arg(mlcube: t.Optional[t.Text]) -> t.Tuple[t.Text, t.Text]:
""" Parse value of the `--mlcube` command line argument.
Args:
mlcube: Path to a MLCube directory or `mlcube.yaml` file. If it's a directory, standard name
`mlcube.yaml` is assumed for MLCube definition file.
Returns:
Tuple (mlcube_root_directory, mlcube_file_name), `mlcube_file_name` is a file name inside
`mlcube_root_directory` directory.
"""
if mlcube is None:
mlcube = os.getcwd()
mlcube_root, mlcube_file = os.path.abspath(mlcube), 'mlcube.yaml'
if os.path.isfile(mlcube_root):
mlcube_root, mlcube_file = os.path.split(mlcube_root)
return mlcube_root, mlcube_file
@staticmethod
def parse_list_arg(arg: t.Optional[t.Text], default: t.Optional[t.Text] = None) -> t.List[t.Text]:
""" Parse a string into list of strings using `,` as a separator.
Args:
arg: String if elements separated with `,`.
default: Default value for `arg` if `arg` is None or empty.
Returns:
List of items.
"""
arg = arg or default
if not arg:
return []
return arg.split(',')
@staticmethod
def parse_extra_arg(*args: t.Text) -> t.Tuple[DictConfig, t.Dict]:
""" Parse extra arguments on a command line.
These arguments correspond to:
- MLCube runtime arguments. These start with `-P` prefix and are translated to a nested dictionaries
structure using `.` as a separator. For instance, `-Pdocker.image=mlcommons/mnist:0.0.1` translates to
python dictionary {'docker': {'image': 'mlcommons/mnist:0.0.1'}}.
- Task arguments are all other arguments that do not star with `-P`. These arguments are input/output
arguments of tasks.
Args:
args: List of arguments that have not been parsed before.
Returns:
Tuple of two dictionaries: (mlcube_arguments, task_arguments).
"""
mlcube_args = OmegaConf.from_dotlist([arg for arg in args if not arg.startswith('-P')])
task_args = [arg[2:].split('=') for arg in args if arg.startswith('-P')]
task_args = {arg[0]: arg[1] for arg in task_args}
return mlcube_args, task_args
class Platforms(object):
@staticmethod
def get_runner(platform: t.Text) -> t.Callable:
"""Return runner class or create function for given platform.
Args:
platform: Platform name (e.g. `docker`, `podman`, `ssh`, `gcp`, `k8s` etc.).
Returns:
Callable object (e.g. runner class) that can create runner instance.
"""
platform = platform.lower()
if platform in ('docker', 'podman'):
try:
from mlcube_docker.docker_run import DockerRun as Runner
except ImportError:
print(f"Docker/Podman runner (platform={platform}) could not be imported.")
raise
else:
raise ValueError(f"Runner for platform '{platform}' is not supported yet.")
return Runner
mlcube_option = click.option(
'--mlcube', required=False, type=str, default=os.getcwd(),
help="Path to MLCube. This can be either a directory path that becomes MLCube's root directory, or path to MLCube"
"definition file (.yaml). In the latter case the MLCube's root directory becomes parent directory of the yaml"
"file. Default is current directory."
)
platform_option = click.option(
'--platform', required=False, type=str, default='docker',
help="Platform to run MLCube, default is 'docker' (that also supports podman)."
)
task_option = click.option(
'--task', required=False, type=str, default='main',
help="MLCube task name(s) to run, default is `main`. This parameter can take a list value, in which case task names"
"are separated with ','."
)
workspace_option = click.option(
'--workspace', required=False, type=str, default=None,
help="Workspace location that is used to store input/output artifacts of MLCube tasks."
)
@click.group(name='mlcube')
def cli():
pass
@cli.command(name='show_config', help='Show MLCube configuration.',
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True))
@mlcube_option
@platform_option
@workspace_option
@click.option('--resolve', is_flag=True, help="Resolve MLCube parameters.")
@click.pass_context
def show_config(ctx: click.core.Context, mlcube: t.Text, platform: t.Text, workspace: t.Text, resolve: bool) -> None:
"""
Args:
ctx: Click context. We need this to get access to extra CLI arguments.
mlcube: Path to MLCube root directory or mlcube.yaml file.
platform: Platform to use to run this MLCube (docker, singularity, gcp, k8s etc).
workspace: Workspace path to use. If not specified, default workspace inside MLCube directory is used.
resolve: if True, compute values in MLCube configuration.
"""
mlcube_root, mlcube_file = CliParser.parse_mlcube_arg(mlcube)
mlcube_cli_args, task_cli_args = CliParser.parse_extra_arg(*ctx.args)
mlcube_config = MLCubeConfig.create_mlcube_config(
os.path.join(mlcube_root, mlcube_file), mlcube_cli_args, task_cli_args, platform, workspace, resolve=resolve
)
print(OmegaConf.to_yaml(mlcube_config))
@cli.command(name='run', help='Run MLCube ML task.',
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True))
@mlcube_option
@platform_option
@task_option
@workspace_option
@click.pass_context
def run(ctx: click.core.Context, mlcube: t.Text, platform: t.Text, task: t.Text, workspace: t.Text) -> None:
"""
Args:
ctx: Click context. We need this to get access to extra CLI arguments.
mlcube: Path to MLCube root directory or mlcube.yaml file.
platform: Platform to use to run this MLCube (docker, singularity, gcp, k8s etc).
task: Comma separated list of tasks to run.
workspace: Workspace path to use. If not specified, default workspace inside MLCube directory is used.
"""
mlcube_root, mlcube_file = CliParser.parse_mlcube_arg(mlcube)
mlcube_cli_args, task_cli_args = CliParser.parse_extra_arg(*ctx.args)
mlcube_config = MLCubeConfig.create_mlcube_config(
os.path.join(mlcube_root, mlcube_file), mlcube_cli_args, task_cli_args, platform, workspace, resolve=True
)
runner_cls: t.Callable = Platforms.get_runner(platform)
tasks: t.List[str] = CliParser.parse_list_arg(task, default='main')
for task in tasks:
docker_runner = runner_cls(mlcube_config, task=task)
docker_runner.run()
if __name__ == "__main__":
cli()
|
the-stack_106_24058 | """This module contains the general information for FabricSanCloudFsmStage ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FabricSanCloudFsmStageConsts:
LAST_UPDATE_TIME_ = ""
NAME_SWITCH_MODE_BEGIN = "SwitchModeBegin"
NAME_SWITCH_MODE_FAIL = "SwitchModeFail"
NAME_SWITCH_MODE_SUCCESS = "SwitchModeSuccess"
NAME_SWITCH_MODE_SW_CONFIG_LOCAL = "SwitchModeSwConfigLocal"
NAME_SWITCH_MODE_SW_CONFIG_PEER = "SwitchModeSwConfigPeer"
NAME_SWITCH_MODE_WAIT_FOR_USER_ACK = "SwitchModeWaitForUserAck"
NAME_NOP = "nop"
STAGE_STATUS_FAIL = "fail"
STAGE_STATUS_IN_PROGRESS = "inProgress"
STAGE_STATUS_NOP = "nop"
STAGE_STATUS_PENDING = "pending"
STAGE_STATUS_SKIP = "skip"
STAGE_STATUS_SUCCESS = "success"
STAGE_STATUS_THROTTLED = "throttled"
class FabricSanCloudFsmStage(ManagedObject):
"""This is FabricSanCloudFsmStage class."""
consts = FabricSanCloudFsmStageConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("FabricSanCloudFsmStage", "fabricSanCloudFsmStage", "stage-[name]", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], [u'fabricSanCloudFsm'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"last_update_time": MoPropertyMeta("last_update_time", "lastUpdateTime", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, None, None, None, None, ["SwitchModeBegin", "SwitchModeFail", "SwitchModeSuccess", "SwitchModeSwConfigLocal", "SwitchModeSwConfigPeer", "SwitchModeWaitForUserAck", "nop"], []),
"order": MoPropertyMeta("order", "order", "ushort", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"retry": MoPropertyMeta("retry", "retry", "byte", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"stage_status": MoPropertyMeta("stage_status", "stageStatus", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"lastUpdateTime": "last_update_time",
"name": "name",
"order": "order",
"retry": "retry",
"rn": "rn",
"sacl": "sacl",
"stageStatus": "stage_status",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.descr = None
self.last_update_time = None
self.order = None
self.retry = None
self.sacl = None
self.stage_status = None
self.status = None
ManagedObject.__init__(self, "FabricSanCloudFsmStage", parent_mo_or_dn, **kwargs)
|
the-stack_106_24059 | import os
from PIL import Image
'''
The file is used for image transformation: horisontal/vertical scale, crop
Currently only 1 of 3 mode ('crop') is used in the app.
Not sure that it's reasonably to delete other two modes. May need it later.
'''
SCALE_WIDTH = 'w'
SCALE_HEIGHT = 'h'
SCALE_BOTH = 'crop'
def crop(img, x, y):
img_x, img_y = img.size
img_ratio = img_x / float(img_y)
crop_ratio = x / float(y)
if crop_ratio == img_ratio:
img.thumbnail([x, y], Image.ANTIALIAS)
return img
if crop_ratio < img_ratio:
scale_factor = img_y / float(y)
img.thumbnail([int(img_x / scale_factor) + 1, y], Image.ANTIALIAS)
img_x, img_y = img.size
x_offset = (img_x - x) / 2
return img.crop([x_offset, 0, x_offset + x, y])
if crop_ratio > img_ratio:
scale_factor = img_x / float(x)
img.thumbnail([x, int(img_y / scale_factor) + 1], Image.ANTIALIAS)
img_x, img_y = img.size
y_offset = (img_y - y) / 2
return img.crop([0, y_offset, x, y_offset + y])
def scale(max_x, pair):
x, y = pair
new_y = (float(max_x) / x) * y
return (int(max_x), int(new_y))
def thumbnail(filename: str, size='200w'):
''' Returns shrinkened/cropped image
image - image
size - sting. Has 3 options:
resize by target HEIGHTS, for example: '50H' (50 pixels height)
resize by target WIDTH, for example: '50W'
resize by target HEIGHTS & WIDTH, for example: '50x150'
If crop needed to fit H&W, the image is cropped centered (cutoff from both
sides)
Example (by target HEIGHTS & WIDTH case):
pic = thumbnail(filepath, str(image_width)+'x'+str(image_height))
'''
# defining the size
if (size.lower().endswith('h')):
mode = 'h'
size = size[:-1]
max_size = int(size.strip())
elif (size.lower().endswith('w')):
mode = 'w'
size = size[:-1]
max_size = int(size.strip())
else:
mode = 'crop'
# if the image wasn't already resized, resize it
image = Image.open(filename)
image_x, image_y = image.size
if mode == SCALE_HEIGHT:
image_y, image_x = scale(max_size, (image_y, image_x))
image.thumbnail([image_x, image_y], Image.ANTIALIAS)
elif mode == SCALE_WIDTH:
image_x, image_y = scale(max_size, (image_x, image_y))
image.thumbnail([image_x, image_y], Image.ANTIALIAS)
elif mode == SCALE_BOTH:
x, y = [int(i) for i in size.split('x')]
image = crop(image, x, y)
else:
raise Exception("Thumbnail size must be in ##w, ##h, or ##x## format.")
return image
|
the-stack_106_24061 | class Solution:
def solve(self, points):
edges = [[abs(points[i][0] - points[j][0]) + abs(points[i][1] - points[j][1]), i, j] for i in range(len(points)) for j in range(i+1,len(points))]
edges.sort()
parents = list(range(len(points)))
ans = 0
def union(x,y,parents):
x,y = find(x,parents),find(y,parents)
parents[x] = y
def find(x,parents):
if x == parents[x]: return x
parents[x] = find(parents[x],parents)
return parents[x]
for cost,i,j in edges:
if find(i,parents) != find(j,parents):
union(i,j,parents)
ans += cost
return ans
|
the-stack_106_24063 | import logging
logging.basicConfig(level=logging.DEBUG)
import os
from slack_bolt.app import App
from slack_bolt.context import BoltContext
bot_token = os.environ.get("SLACK_SDK_TEST_SOCKET_MODE_BOT_TOKEN")
app = App(signing_secret="will-be-removed-soon", token=bot_token)
@app.event("app_mention")
def mention(context: BoltContext):
context.say(":wave: Hi there!")
@app.event("message")
def message(context: BoltContext, event: dict):
context.client.reactions_add(
channel=event["channel"],
timestamp=event["ts"],
name="eyes",
)
@app.command("/hello-socket-mode")
def hello_command(ack, body):
user_id = body["user_id"]
ack(f"Hi <@{user_id}>!")
async def main():
from bolt_adapter.websockets import SocketModeHandler
app_token = os.environ.get("SLACK_SDK_TEST_SOCKET_MODE_APP_TOKEN")
await SocketModeHandler(app, app_token).start_async()
if __name__ == "__main__":
import asyncio
asyncio.run(main())
# export SLACK_SDK_TEST_SOCKET_MODE_APP_TOKEN=
# export SLACK_SDK_TEST_SOCKET_MODE_BOT_TOKEN=
# pip install .[optional]
# pip install slack_bolt
# python integration_tests/samples/socket_mode/{this file name}.py
|
the-stack_106_24064 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
import numpy as np # type: ignore
from onnx import helper, defs, numpy_helper, checker
from onnx import AttributeProto, TensorProto, GraphProto, DenotationConstProto
from typing import Text, Any, List
import unittest
class TestHelperAttributeFunctions(unittest.TestCase):
def test_attr_float(self): # type: () -> None
# float
attr = helper.make_attribute("float", 1.)
self.assertEqual(attr.name, "float")
self.assertEqual(attr.f, 1.)
checker.check_attribute(attr)
# float with scientific
attr = helper.make_attribute("float", 1e10)
self.assertEqual(attr.name, "float")
self.assertEqual(attr.f, 1e10)
checker.check_attribute(attr)
def test_attr_int(self): # type: () -> None
# integer
attr = helper.make_attribute("int", 3)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 3)
checker.check_attribute(attr)
# long integer
attr = helper.make_attribute("int", 5)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 5)
checker.check_attribute(attr)
# octinteger
attr = helper.make_attribute("int", 0o1701)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 0o1701)
checker.check_attribute(attr)
# hexinteger
attr = helper.make_attribute("int", 0x1701)
self.assertEqual(attr.name, "int")
self.assertEqual(attr.i, 0x1701)
checker.check_attribute(attr)
def test_attr_doc_string(self): # type: () -> None
attr = helper.make_attribute("a", "value")
self.assertEqual(attr.name, "a")
self.assertEqual(attr.doc_string, "")
attr = helper.make_attribute("a", "value", "doc")
self.assertEqual(attr.name, "a")
self.assertEqual(attr.doc_string, "doc")
def test_attr_string(self): # type: () -> None
# bytes
attr = helper.make_attribute("str", b"test")
self.assertEqual(attr.name, "str")
self.assertEqual(attr.s, b"test")
checker.check_attribute(attr)
# unspecified
attr = helper.make_attribute("str", "test")
self.assertEqual(attr.name, "str")
self.assertEqual(attr.s, b"test")
checker.check_attribute(attr)
# unicode
attr = helper.make_attribute("str", u"test")
self.assertEqual(attr.name, "str")
self.assertEqual(attr.s, b"test")
checker.check_attribute(attr)
def test_attr_repeated_float(self): # type: () -> None
attr = helper.make_attribute("floats", [1.0, 2.0])
self.assertEqual(attr.name, "floats")
self.assertEqual(list(attr.floats), [1.0, 2.0])
checker.check_attribute(attr)
def test_attr_repeated_int(self): # type: () -> None
attr = helper.make_attribute("ints", [1, 2])
self.assertEqual(attr.name, "ints")
self.assertEqual(list(attr.ints), [1, 2])
checker.check_attribute(attr)
def test_attr_repeated_str(self): # type: () -> None
attr = helper.make_attribute("strings", ["str1", "str2"])
self.assertEqual(attr.name, "strings")
self.assertEqual(list(attr.strings), [b"str1", b"str2"])
checker.check_attribute(attr)
def test_attr_repeated_tensor_proto(self): # type: () -> None
tensors = [
helper.make_tensor(
name='a',
data_type=TensorProto.FLOAT,
dims=(1,),
vals=np.ones(1).tolist()
),
helper.make_tensor(
name='b',
data_type=TensorProto.FLOAT,
dims=(1,),
vals=np.ones(1).tolist()
)]
attr = helper.make_attribute("tensors", tensors)
self.assertEqual(attr.name, "tensors")
self.assertEqual(list(attr.tensors), tensors)
checker.check_attribute(attr)
def test_attr_repeated_graph_proto(self): # type: () -> None
graphs = [GraphProto(), GraphProto()]
graphs[0].name = "a"
graphs[1].name = "b"
attr = helper.make_attribute("graphs", graphs)
self.assertEqual(attr.name, "graphs")
self.assertEqual(list(attr.graphs), graphs)
checker.check_attribute(attr)
def test_is_attr_legal(self): # type: () -> None
# no name, no field
attr = AttributeProto()
self.assertRaises(checker.ValidationError, checker.check_attribute, attr)
# name, but no field
attr = AttributeProto()
attr.name = "test"
self.assertRaises(checker.ValidationError, checker.check_attribute, attr)
# name, with two fields
attr = AttributeProto()
attr.name = "test"
attr.f = 1.0
attr.i = 2
self.assertRaises(checker.ValidationError, checker.check_attribute, attr)
def test_is_attr_legal_verbose(self): # type: () -> None
def _set(attr, type, var, value): # type: (AttributeProto, AttributeProto.AttributeType, Text, Any) -> None
setattr(attr, var, value)
setattr(attr, 'type', type)
def _extend(attr, type, var, value): # type: (AttributeProto, AttributeProto.AttributeType, List[Any], Any) -> None
var.extend(value)
setattr(attr, 'type', type)
SET_ATTR = [
(lambda attr: _set(attr, AttributeProto.FLOAT, "f", 1.0)),
(lambda attr: _set(attr, AttributeProto.INT, "i", 1)),
(lambda attr: _set(attr, AttributeProto.STRING, "s", b"str")),
(lambda attr: _extend(attr, AttributeProto.FLOATS, attr.floats, [1.0, 2.0])),
(lambda attr: _extend(attr, AttributeProto.INTS, attr.ints, [1, 2])),
(lambda attr: _extend(attr, AttributeProto.STRINGS, attr.strings, [b"a", b"b"])),
]
# Randomly set one field, and the result should be legal.
for _i in range(100):
attr = AttributeProto()
attr.name = "test"
random.choice(SET_ATTR)(attr)
checker.check_attribute(attr)
# Randomly set two fields, and then ensure helper function catches it.
for _i in range(100):
attr = AttributeProto()
attr.name = "test"
for func in random.sample(SET_ATTR, 2):
func(attr)
self.assertRaises(checker.ValidationError,
checker.check_attribute,
attr)
class TestHelperNodeFunctions(unittest.TestCase):
def test_node_no_arg(self): # type: () -> None
self.assertTrue(defs.has("Relu"))
node_def = helper.make_node(
"Relu", ["X"], ["Y"], name="test")
self.assertEqual(node_def.op_type, "Relu")
self.assertEqual(node_def.name, "test")
self.assertEqual(list(node_def.input), ["X"])
self.assertEqual(list(node_def.output), ["Y"])
def test_attr_doc_string(self): # type: () -> None
node_def = helper.make_node(
"Relu", ["X"], ["Y"], name="test", doc_string="doc")
self.assertEqual(node_def.doc_string, "doc")
def test_node_with_arg(self): # type: () -> None
self.assertTrue(defs.has("Relu"))
# Note: Relu actually does not need an arg, but let's
# test it.
node_def = helper.make_node(
"Relu", ["X"], ["Y"],
arg_value=1)
self.assertEqual(node_def.op_type, "Relu")
self.assertEqual(list(node_def.input), ["X"])
self.assertEqual(list(node_def.output), ["Y"])
self.assertEqual(len(node_def.attribute), 1)
self.assertEqual(
node_def.attribute[0],
helper.make_attribute("arg_value", 1))
def test_graph(self): # type: () -> None
node_def1 = helper.make_node(
"Relu", ["X"], ["Y"])
node_def2 = helper.make_node(
"Add", ["X", "Y"], ["Z"])
value_info = [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])]
graph = helper.make_graph(
[node_def1, node_def2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])],
doc_string=None,
value_info=value_info,
)
self.assertEqual(graph.name, "test")
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0], node_def1)
self.assertEqual(graph.node[1], node_def2)
self.assertEqual(graph.doc_string, "")
self.assertEqual(graph.value_info[0], value_info[0])
def test_graph_docstring(self): # type: () -> None
graph = helper.make_graph([], "my graph", [], [], None, "my docs")
self.assertEqual(graph.name, "my graph")
self.assertEqual(graph.doc_string, "my docs")
def test_model(self): # type: () -> None
node_def = helper.make_node(
"Relu", ["X"], ["Y"])
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
self.assertRaises(AttributeError, helper.make_model, graph_def, xxx=1)
model_def = helper.make_model(graph_def, producer_name='test')
self.assertEqual(model_def.producer_name, 'test')
def test_model_docstring(self): # type: () -> None
graph = helper.make_graph([], "my graph", [], [])
model_def = helper.make_model(graph, doc_string='test')
# models may have their own documentation, but don't have a name
# their name is the domain-qualified name of the underlying graph.
self.assertFalse(hasattr(model_def, "name"))
self.assertEqual(model_def.doc_string, 'test')
def test_model_metadata_props(self): # type: () -> None
graph = helper.make_graph([], "my graph", [], [])
model_def = helper.make_model(graph, doc_string='test')
helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
checker.check_model(model_def)
helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
checker.check_model(model_def) # helper replaces, so no dupe
dupe = model_def.metadata_props.add()
dupe.key = 'Title'
dupe.value = 'Other'
self.assertRaises(checker.ValidationError, checker.check_model, model_def)
def test_shape_denotation(self): # type: () -> None
shape_denotation = [DenotationConstProto().DATA_BATCH,
DenotationConstProto().DATA_CHANNEL,
DenotationConstProto().DATA_FEATURE,
DenotationConstProto().DATA_FEATURE]
tensor = helper.make_tensor_value_info("X",
TensorProto.FLOAT,
[2, 2, 2, 2],
shape_denotation=shape_denotation)
for i, d in enumerate(tensor.type.tensor_type.shape.dim):
self.assertEqual(d.denotation, shape_denotation[i])
class TestHelperTensorFunctions(unittest.TestCase):
def test_make_tensor(self): # type: () -> None
np_array = np.random.randn(2, 3).astype(np.float32)
tensor = helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=np_array.reshape(6).tolist()
)
self.assertEqual(tensor.name, 'test')
np.testing.assert_equal(np_array, numpy_helper.to_array(tensor))
# use raw_data field to store the data
tensor = helper.make_tensor(
name='test',
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=np_array.reshape(6).tobytes(),
raw=True,
)
np.testing.assert_equal(np_array, numpy_helper.to_array(tensor))
string_list = list(s.encode('ascii') for s in ['Amy', 'Billy', 'Cindy', 'David'])
tensor = helper.make_tensor(
name='test',
data_type=TensorProto.STRING,
dims=(2, 2),
vals=string_list,
raw=False
)
self.assertEqual(string_list, list(tensor.string_data))
def test_make_tensor_value_info(self): # type: () -> None
vi = helper.make_tensor_value_info('X', TensorProto.FLOAT, (2, 4))
checker.check_value_info(vi)
# scalar value
vi = helper.make_tensor_value_info('Y', TensorProto.FLOAT, ())
checker.check_value_info(vi)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_24065 | # encoding: UTF-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cmd
import csv
import os
import shutil
import string
import sys
import platform
import subprocess
import requests
from QUANTAXIS.QACmd.runner import run_backtest, run
from QUANTAXIS.QAApplication.QAAnalysis import QA_backtest_analysis_backtest
from QUANTAXIS.QAUtil import QA_util_log_info, QA_Setting, QA_util_mongo_initial
from QUANTAXIS.QASU.main import (
QA_SU_save_stock_list,
QA_SU_save_stock_min,
QA_SU_save_stock_xdxr,
QA_SU_save_stock_block,
QA_SU_save_stock_info,
QA_SU_save_stock_info_tushare,
QA_SU_save_stock_day,
QA_SU_save_index_day,
QA_SU_save_index_min,
QA_SU_save_future_list,
QA_SU_save_index_list,
QA_SU_save_etf_list,
QA_SU_save_etf_day,
QA_SU_save_etf_min,
QA_SU_save_financialfiles,
QA_SU_save_option_50etf_day,
QA_SU_save_option_50etf_min,
QA_SU_save_option_commodity_day,
QA_SU_save_option_commodity_min,
QA_SU_save_option_contract_list,
QA_SU_save_option_day_all,
QA_SU_save_option_min_all,
QA_SU_save_future_day,
QA_SU_save_future_min,
QA_SU_save_future_min_all,
QA_SU_save_future_day_all,
QA_SU_save_report_calendar_day,
QA_SU_save_report_calendar_his,
QA_SU_save_stock_divyield_day,
QA_SU_save_stock_divyield_his
)
from QUANTAXIS.QASU.save_binance import QA_SU_save_binance_symbol, QA_SU_save_binance_1hour, \
QA_SU_save_binance_1day, QA_SU_save_binance_1min, QA_SU_save_binance
from QUANTAXIS.QASU.save_bitmex import QA_SU_save_bitmex_symbol, QA_SU_save_bitmex
# 东方财富爬虫
from QUANTAXIS.QASU.main import (QA_SU_crawl_eastmoney)
from QUANTAXIS import __version__
class CLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = 'QUANTAXIS> ' # 定义命令行提示符
def do_shell(self, arg):
"run a shell commad"
print(">", arg)
sub_cmd = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
print(sub_cmd.communicate()[0])
def do_version(self, arg):
QA_util_log_info(__version__)
def help_version(self):
print("syntax: version [message]",)
print("-- prints a version message")
# @click.command()
# @click.option('--e', default=1, help='Number of greetings.')
def do_examples(self, arg):
QA_util_log_info('QUANTAXIS example')
now_path = os.getcwd()
#project_dir = os.path.dirname(os.path.abspath(__file__))
data = requests.get(
'https://codeload.github.com/quantaxis/QADemo/zip/master'
)
with open("{}{}QADEMO.zip".format(now_path, os.sep), "wb") as code:
code.write(data.content)
QA_util_log_info(
'Successfully generate QADEMO in : {}, for more examples, please visit https://github.com/quantaxis/qademo'
.format(now_path)
)
def help_examples(self):
print('make a sample backtest framework')
def do_download_updatex(self, arg):
now_path = os.getcwd()
data = requests.get(
'https://raw.githubusercontent.com/QUANTAXIS/QUANTAXIS/master/config/update_x.py'
)
with open("{}{}update_x.py".format(now_path, os.sep), "wb") as code:
code.write(data.content)
def do_download_updateall(self, arg):
now_path = os.getcwd()
data = requests.get(
'https://raw.githubusercontent.com/QUANTAXIS/QUANTAXIS/master/config/update_all.py'
)
with open("{}{}update_all.py".format(now_path, os.sep), "wb") as code:
code.write(data.content)
def do_drop_database(self, arg):
QA_util_mongo_initial()
def help_drop_database(self):
print('drop quantaxis\'s databases')
def do_quit(self, arg): # 定义quit命令所执行的操作
sys.exit(1)
def help_quit(self): # 定义quit命令的帮助输出
print("syntax: quit",)
print("-- terminates the application")
def do_clean(self, arg):
try:
if platform.system() == 'Windows':
os.popen('del back*csv')
os.popen('del *log')
else:
os.popen('rm -rf back*csv')
os.popen('rm -rf *log')
except:
pass
def help_clean(self):
QA_util_log_info('Clean the old backtest reports and logs')
def do_exit(self, arg): # 定义quit命令所执行的操作
sys.exit(1)
def help_exit(self):
print('syntax: exit')
print("-- terminates the application")
def print_crawl_usage(self):
print(
"Usage: \n\
----------------------------------------------------------------------------------------------------------------------\n\
⌨️命令格式:crawl eastmoney zjlx 6位股票代码 : 抓取 东方财富 资金流向 ❤️鸣谢❤️ www.eastmoney.com 网页提供数据!\n\
⌨️命令格式:crawl jrj zjlx 6位股票代码 : 抓取 金融界 资金流向 ❤️鸣谢❤️ www.jrj.com.cn 网页提供数据!\n\
⌨️命令格式:crawl 10jqka funds 6位股票代码 : 抓取 同花顺 资金流向 ❤️鸣谢❤️ www.10jqka.com.cn 网页提供数据!\n\
-----------------------------------------------------------------------------------------------------------------------\n\
⌨️命令格式:crawl eastmoney zjlx all : 抓取 东方财富 所有股票资金流向 ❤️鸣谢❤️ www.eastmoney.com 网页提供数据!\n\
⌨️命令格式:crawl jrj zjlx all : 抓取 金融界 所有股票资金流向 ❤️鸣谢❤️ www.jrj.com.cn 网页提供数据!\n\
⌨️命令格式:crawl 10jqka funds all : 抓取 同花顺 所有股票资金流向 ❤️鸣谢❤️ www.10jqka.com.cn 网页提供数据!\n\
-----------------------------------------------------------------------------------------------------------------------\n\
@yutiansut\n\
@QUANTAXIS\n\
请访问 https://book.yutiansut.com/\n\
"
)
def do_crawl(self, arg):
if arg == '':
self.print_crawl_usage()
else:
arg = arg.split(' ')
if len(arg) == 3 and arg[0] == 'eastmoney' and arg[
1] == 'zjlx' and arg[2] != 'all':
print(" 准备抓取东方财富资金流向数据 ")
QA_SU_crawl_eastmoney(action=arg[1], stockCode=arg[2])
elif len(arg) == 3 and arg[0] == 'jrj' and arg[
1] == 'zjlx' and arg[2] != 'all':
print("❌crawl jrj zjlx XXXXXX !没有实现")
elif len(arg) == 3 and arg[0] == '10jqka' and arg[
1] == 'funds' and arg[2] != 'all':
print("❌crawl 10jqka funds XXXXXX !没有实现")
elif len(arg) == 3 and arg[0] == 'eastmoney' and arg[
1] == 'zjlx' and arg[2] == 'all':
#print("❌crawl eastmoney zjlx all !没有实现")
print(" 准备抓取东方财富资金流向数据 ")
QA_SU_crawl_eastmoney(action=arg[1], stockCode=arg[2])
elif len(arg) == 3 and arg[0] == 'jrj' and arg[1] == 'zjlx' and arg[
2] == 'all':
print("❌crawl jrj zjlx all !没有实现")
elif len(arg) == 3 and arg[0] == '10jqka' and arg[
1] == 'funds' and arg[2] == 'all':
print("❌crawl 10jqka funds all !没有实现")
else:
print("❌crawl 命令格式不正确!")
self.print_crawl_usage()
def print_save_usage(self):
print(
"Usage: \n\
命令格式:save all : save stock_day/xdxr/ index_day/ stock_list/index_list \n\
命令格式:save X|x : save stock_day/xdxr/min index_day/min etf_day/min stock_list/index_list/block \n\
命令格式:save day : save stock_day/xdxr index_day etf_day stock_list/index_list \n\
命令格式:save min : save stock_min/xdxr index_min etf_min stock_list/index_list \n\
命令格式: save future: save future_day/min/list \n\
命令格式: save ox: save option_contract_list/option_day/option_min/option_commodity_day/option_commodity_min \n\
------------------------------------------------------------ \n\
命令格式:save stock_day : 保存日线数据 \n\
命令格式:save stock_xdxr : 保存日除权除息数据 \n\
命令格式:save stock_min : 保存分钟线数据 \n\
命令格式:save index_day : 保存指数日线数据 \n\
命令格式:save index_min : 保存指数分钟线数据 \n\
命令格式:save future_day : 保存期货日线数据 \n\
命令格式:save future_min : 保存期货分钟线数据 \n\
命令格式:save etf_day : 保存ETF日线数据 \n\
命令格式:save etf_min : 保存ET分钟数据 \n\
命令格式:save stock_list : 保存股票列表 \n\
命令格式:save stock_block: 保存板块 \n\
命令格式:save stock_info : 保存tushare数据接口获取的股票列表 \n\
命令格式:save financialfiles : 保存高级财务数据(自1996年开始) \n\
命令格式:save option_contract_list 保存上市的期权合约信息(不包括已经过期摘牌的合约数据)\n\
命令格式:save 50etf_option_day : 保存50ETF期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save 50etf_option_min : 保存50ETF期权分钟线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_commodity_day : 保存商品期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_commodity_min : 保存商品期权分钟线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_day_all : 保存所有期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_min_all : 保存所有期权分钟数据(不包括已经过期摘牌的数据) \n\
命令格式: save index_list : 保存指数列表 \n\
命令格式: save etf_list : 保存etf列表 \n\
命令格式: save future_list : 保存期货列表 \n\
----------------------------------------------------------\n\
if you just want to save daily data just\n\
save all+ save stock_block+save stock_info, it about 1G data \n\
if you want to save save the fully data including min level \n\
save x + save stock_info \n \n\
@yutiansut\n\
@QUANTAXIS\n\
请访问 https://book.yutiansut.com/\n\
"
)
def do_save(self, arg):
# 仅仅是为了初始化才在这里插入用户,如果想要注册用户,要到webkit底下注册
if arg == '':
self.print_save_usage()
else:
arg = arg.split(' ')
if len(arg) == 1 and arg[0] == 'all':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
# TODO: 将ts还是tdx作为命令传入
# QA_SU_save_stock_day('ts')
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
# QA_SU_save_stock_min('tdx')
QA_SU_save_index_day('tdx')
# QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
# QA_SU_save_etf_day('tdx')
# QA_SU_save_etf_min('tdx')
QA_SU_save_index_list('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_stock_block('tdx')
# QA_SU_save_stock_info('tdx')
# QA_SU_save_report_calendar_his()
# QA_SU_save_stock_divyield_his()
elif len(arg) == 1 and arg[0] == 'day':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
# QA_SU_save_stock_min('tdx')
QA_SU_save_index_day('tdx')
# QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
QA_SU_save_etf_day('tdx')
# QA_SU_save_etf_min('tdx')
QA_SU_save_index_list('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_stock_block('tdx')
# QA_SU_save_stock_divyield_day()
# QA_SU_save_report_calendar_day()
elif len(arg) == 1 and arg[0] == 'min':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
# QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
QA_SU_save_stock_min('tdx')
# QA_SU_save_index_day('tdx')
QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
# QA_SU_save_etf_day('tdx')
QA_SU_save_etf_min('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_index_list('tdx')
# QA_SU_save_stock_block('tdx')
elif len(arg) == 1 and arg[0] in ['X', 'x']:
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
QA_SU_save_stock_min('tdx')
QA_SU_save_index_day('tdx')
QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
QA_SU_save_etf_day('tdx')
QA_SU_save_etf_min('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_index_list('tdx')
QA_SU_save_stock_block('tdx')
QA_SU_save_future_list('tdx')
# QA_SU_save_stock_info('tdx')
elif len(arg) == 1 and arg[0] == "binance":
QA_SU_save_binance_symbol()
QA_SU_save_binance_1day()
QA_SU_save_binance_1hour()
QA_SU_save_binance_1day()
QA_SU_save_binance_1min()
elif len(arg) == 2 and arg[0] == "binance":
frequency = arg[1]
QA_SU_save_binance(frequency)
elif len(arg) == 1 and arg[0] == "bitmex":
QA_SU_save_bitmex_symbol()
QA_SU_save_bitmex('1m')
QA_SU_save_bitmex('1h')
QA_SU_save_bitmex('1d')
elif len(arg) == 1 and arg[0] == "huobi":
pass
elif len(arg) == 1 and arg[0] == "financialfiles":
QA_SU_save_financialfiles()
elif len(arg) == 1 and arg[0] == "future":
QA_SU_save_future_day('tdx')
QA_SU_save_future_min('tdx')
QA_SU_save_future_list('tdx')
elif len(arg) == 1 and arg[0] == "future_all":
QA_SU_save_future_day_all('tdx')
QA_SU_save_future_min_all('tdx')
QA_SU_save_future_list('tdx')
elif len(arg) == 1 and arg[0] == '50etf_option_day':
QA_SU_save_option_50etf_day('tdx')
elif len(arg) == 1 and arg[0] == '50etf_option_min':
QA_SU_save_option_50etf_min('tdx')
elif len(arg) == 1 and arg[0] == 'option_commodity_day':
QA_SU_save_option_commodity_day('tdx')
elif len(arg) == 1 and arg[0] == 'option_commodity_min':
QA_SU_save_option_commodity_min('tdx')
elif len(arg) == 1 and arg[0] in ['ox', 'OX', 'oX', 'Ox']:
QA_SU_save_option_contract_list('tdx')
QA_SU_save_option_50etf_day('tdx')
QA_SU_save_option_50etf_min('tdx')
QA_SU_save_option_commodity_day('tdx')
QA_SU_save_option_commodity_min('tdx')
else:
for i in arg:
if i == 'insert_user':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
else:
try:
eval("QA_SU_save_%s('tdx')" % (i))
except:
print("❌命令格式不正确!")
self.print_save_usage()
def help_save(self):
QA_util_log_info('Save all the stock data from pytdx')
def do_fn(self, arg):
try:
QA_util_log_info(eval(arg))
except:
print(Exception)
def do_help(self, arg):
QA_util_log_info("Possible commands are:")
QA_util_log_info("save")
QA_util_log_info("clean")
QA_util_log_info("fn")
QA_util_log_info("drop_database")
QA_util_log_info("examples")
QA_util_log_info("shell")
QA_util_log_info("version")
QA_util_log_info("quit")
QA_util_log_info("exit")
QA_util_log_info('MORE EXAMPLE on https://github.com/QUANTAXIS/QADemo')
def help(self):
QA_util_log_info('fn+methods name')
def do_ls(self, arg):
QA_util_log_info(os.path.dirname(os.path.abspath(__file__)))
def sourcecpy(src, des):
src = os.path.normpath(src)
des = os.path.normpath(des)
if not os.path.exists(src) or not os.path.exists(src):
print("folder is not exist")
sys.exit(1)
# 获得原始目录中所有的文件,并拼接每个文件的绝对路径
os.chdir(src)
src_file = [os.path.join(src, file) for file in os.listdir()]
for source in src_file:
# 若是文件
if os.path.isfile(source):
shutil.copy(source, des) # 第一个参数是文件,第二个参数目录
# 若是目录
if os.path.isdir(source):
p, src_name = os.path.split(source)
des = os.path.join(des, src_name)
shutil.copytree(source, des) # 第一个参数是目录,第二个参数也是目录
# 创建CLI实例并运行
def QA_cmd():
cli = CLI()
cli.cmdloop()
|
the-stack_106_24067 | """Test the binascii C module."""
from test import support
import unittest
import binascii
import array
# Note: "*_hex" functions are aliases for "(un)hexlify"
b2a_functions = ['b2a_base64', 'b2a_hex', 'b2a_hqx', 'b2a_qp', 'b2a_uu',
'hexlify', 'rlecode_hqx']
a2b_functions = ['a2b_base64', 'a2b_hex', 'a2b_hqx', 'a2b_qp', 'a2b_uu',
'unhexlify', 'rledecode_hqx']
all_functions = a2b_functions + b2a_functions + ['crc32', 'crc_hqx']
class BinASCIITest(unittest.TestCase):
type2test = bytes
# Create binary test data
rawdata = b"The quick brown fox jumps over the lazy dog.\r\n"
# Be slow so we don't depend on other modules
rawdata += bytes(range(256))
rawdata += b"\r\nHello world.\n"
def setUp(self):
self.data = self.type2test(self.rawdata)
def test_exceptions(self):
# Check module exceptions
self.assertTrue(issubclass(binascii.Error, Exception))
self.assertTrue(issubclass(binascii.Incomplete, Exception))
def test_functions(self):
# Check presence of all functions
for name in all_functions:
self.assertTrue(hasattr(getattr(binascii, name), '__call__'))
self.assertRaises(TypeError, getattr(binascii, name))
def test_returned_value(self):
# Limit to the minimum of all limits (b2a_uu)
MAX_ALL = 45
raw = self.rawdata[:MAX_ALL]
for fa, fb in zip(a2b_functions, b2a_functions):
a2b = getattr(binascii, fa)
b2a = getattr(binascii, fb)
try:
a = b2a(self.type2test(raw))
res = a2b(self.type2test(a))
except Exception as err:
self.fail("{}/{} conversion raises {!r}".format(fb, fa, err))
if fb == 'b2a_hqx':
# b2a_hqx returns a tuple
res, _ = res
self.assertEqual(res, raw, "{}/{} conversion: "
"{!r} != {!r}".format(fb, fa, res, raw))
self.assertIsInstance(res, bytes)
self.assertIsInstance(a, bytes)
self.assertLess(max(a), 128)
self.assertIsInstance(binascii.crc_hqx(raw, 0), int)
self.assertIsInstance(binascii.crc32(raw), int)
def test_base64valid(self):
# Test base64 with valid data
MAX_BASE64 = 57
lines = []
for i in range(0, len(self.rawdata), MAX_BASE64):
b = self.type2test(self.rawdata[i:i+MAX_BASE64])
a = binascii.b2a_base64(b)
lines.append(a)
res = bytes()
for line in lines:
a = self.type2test(line)
b = binascii.a2b_base64(a)
res += b
self.assertEqual(res, self.rawdata)
def test_base64invalid(self):
# Test base64 with random invalid characters sprinkled throughout
# (This requires a new version of binascii.)
MAX_BASE64 = 57
lines = []
for i in range(0, len(self.data), MAX_BASE64):
b = self.type2test(self.rawdata[i:i+MAX_BASE64])
a = binascii.b2a_base64(b)
lines.append(a)
fillers = bytearray()
valid = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
for i in range(256):
if i not in valid:
fillers.append(i)
def addnoise(line):
noise = fillers
ratio = len(line) // len(noise)
res = bytearray()
while line and noise:
if len(line) // len(noise) > ratio:
c, line = line[0], line[1:]
else:
c, noise = noise[0], noise[1:]
res.append(c)
return res + noise + line
res = bytearray()
for line in map(addnoise, lines):
a = self.type2test(line)
b = binascii.a2b_base64(a)
res += b
self.assertEqual(res, self.rawdata)
# Test base64 with just invalid characters, which should return
# empty strings. TBD: shouldn't it raise an exception instead ?
self.assertEqual(binascii.a2b_base64(self.type2test(fillers)), b'')
def test_uu(self):
MAX_UU = 45
lines = []
for i in range(0, len(self.data), MAX_UU):
b = self.type2test(self.rawdata[i:i+MAX_UU])
a = binascii.b2a_uu(b)
lines.append(a)
res = bytes()
for line in lines:
a = self.type2test(line)
b = binascii.a2b_uu(a)
res += b
self.assertEqual(res, self.rawdata)
self.assertEqual(binascii.a2b_uu(b"\x7f"), b"\x00"*31)
self.assertEqual(binascii.a2b_uu(b"\x80"), b"\x00"*32)
self.assertEqual(binascii.a2b_uu(b"\xff"), b"\x00"*31)
self.assertRaises(binascii.Error, binascii.a2b_uu, b"\xff\x00")
self.assertRaises(binascii.Error, binascii.a2b_uu, b"!!!!")
self.assertRaises(binascii.Error, binascii.b2a_uu, 46*b"!")
# Issue #7701 (crash on a pydebug build)
self.assertEqual(binascii.b2a_uu(b'x'), b'!> \n')
def test_crc32(self):
crc = binascii.crc32(self.type2test(b"Test the CRC-32 of"))
crc = binascii.crc32(self.type2test(b" this string."), crc)
self.assertEqual(crc, 1571220330)
self.assertRaises(TypeError, binascii.crc32)
def test_hqx(self):
# Perform binhex4 style RLE-compression
# Then calculate the hexbin4 binary-to-ASCII translation
rle = binascii.rlecode_hqx(self.data)
a = binascii.b2a_hqx(self.type2test(rle))
b, _ = binascii.a2b_hqx(self.type2test(a))
res = binascii.rledecode_hqx(b)
self.assertEqual(res, self.rawdata)
def test_hex(self):
# test hexlification
s = b'{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000'
t = binascii.b2a_hex(self.type2test(s))
u = binascii.a2b_hex(self.type2test(t))
self.assertEqual(s, u)
self.assertRaises(binascii.Error, binascii.a2b_hex, t[:-1])
self.assertRaises(binascii.Error, binascii.a2b_hex, t[:-1] + b'q')
self.assertEqual(binascii.hexlify(b'a'), b'61')
def test_qp(self):
# A test for SF bug 534347 (segfaults without the proper fix)
try:
binascii.a2b_qp(b"", **{1:1})
except TypeError:
pass
else:
self.fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError")
self.assertEqual(binascii.a2b_qp(b"= "), b"= ")
self.assertEqual(binascii.a2b_qp(b"=="), b"=")
self.assertEqual(binascii.a2b_qp(b"=AX"), b"=AX")
self.assertRaises(TypeError, binascii.b2a_qp, foo="bar")
self.assertEqual(binascii.a2b_qp(b"=00\r\n=00"), b"\x00\r\n\x00")
self.assertEqual(
binascii.b2a_qp(b"\xff\r\n\xff\n\xff"),
b"=FF\r\n=FF\r\n=FF")
self.assertEqual(
binascii.b2a_qp(b"0"*75+b"\xff\r\n\xff\r\n\xff"),
b"0"*75+b"=\r\n=FF\r\n=FF\r\n=FF")
self.assertEqual(binascii.b2a_qp(b'\0\n'), b'=00\n')
self.assertEqual(binascii.b2a_qp(b'\0\n', quotetabs=True), b'=00\n')
self.assertEqual(binascii.b2a_qp(b'foo\tbar\t\n'), b'foo\tbar=09\n')
self.assertEqual(binascii.b2a_qp(b'foo\tbar\t\n', quotetabs=True),
b'foo=09bar=09\n')
self.assertEqual(binascii.b2a_qp(b'.'), b'=2E')
self.assertEqual(binascii.b2a_qp(b'.\n'), b'=2E\n')
self.assertEqual(binascii.b2a_qp(b'a.\n'), b'a.\n')
def test_empty_string(self):
# A test for SF bug #1022953. Make sure SystemError is not raised.
empty = self.type2test(b'')
for func in all_functions:
if func == 'crc_hqx':
# crc_hqx needs 2 arguments
binascii.crc_hqx(empty, 0)
continue
f = getattr(binascii, func)
try:
f(empty)
except Exception as err:
self.fail("{}({!r}) raises {!r}".format(func, empty, err))
def test_unicode_b2a(self):
# Unicode strings are not accepted by b2a_* functions.
for func in set(all_functions) - set(a2b_functions) | {'rledecode_hqx'}:
try:
self.assertRaises(TypeError, getattr(binascii, func), "test")
except Exception as err:
self.fail('{}("test") raises {!r}'.format(func, err))
# crc_hqx needs 2 arguments
self.assertRaises(TypeError, binascii.crc_hqx, "test", 0)
def test_unicode_a2b(self):
# Unicode strings are accepted by a2b_* functions.
MAX_ALL = 45
raw = self.rawdata[:MAX_ALL]
for fa, fb in zip(a2b_functions, b2a_functions):
if fa == 'rledecode_hqx':
# Takes non-ASCII data
continue
a2b = getattr(binascii, fa)
b2a = getattr(binascii, fb)
try:
a = b2a(self.type2test(raw))
binary_res = a2b(a)
a = a.decode('ascii')
res = a2b(a)
except Exception as err:
self.fail("{}/{} conversion raises {!r}".format(fb, fa, err))
if fb == 'b2a_hqx':
# b2a_hqx returns a tuple
res, _ = res
binary_res, _ = binary_res
self.assertEqual(res, raw, "{}/{} conversion: "
"{!r} != {!r}".format(fb, fa, res, raw))
self.assertEqual(res, binary_res)
self.assertIsInstance(res, bytes)
# non-ASCII string
self.assertRaises(ValueError, a2b, "\x80")
class ArrayBinASCIITest(BinASCIITest):
def type2test(self, s):
return array.array('B', list(s))
class BytearrayBinASCIITest(BinASCIITest):
type2test = bytearray
class MemoryviewBinASCIITest(BinASCIITest):
type2test = memoryview
def test_main():
support.run_unittest(BinASCIITest,
ArrayBinASCIITest,
BytearrayBinASCIITest,
MemoryviewBinASCIITest)
if __name__ == "__main__":
test_main()
|
the-stack_106_24068 | from __future__ import annotations
import numpy as np
from typing import Union, List, Tuple
class Calibration:
"""
A class containing calibration information for spectra or Q-matrices.
This class handles all matters relating to calibrations, energy binning etc.
Attributes
----------
n_channels : int
Number of channels
coefficients : Array[length = 3]
Calibration coefficients
label : str
Label for the energy scale, used for plotting. Default is "Energy, keV".
Methods
-------
is_calibrated -> bool
Property which returns True if calibration coefficients provided.
channel_numbers -> np.ndarray
Property which returns a 1D array of channel numbers for the calibration.
energies -> np.ndarray
Property which returns an array of energy values for the lower limits of each energy bin, if the calibration
coefficients are supplied, otherwise it returns the channel numbers.
bin_widths -> np.ndarray or None.
Property which returns an array of bin widths in energy units if calibration coefficients are supplied,
otherwise it returns None.
lower_bin_limits -> np.ndarray or None
Property which returns an array of energy values for the lower limits of each energy bin, if the calibration
coefficients are supplied, otherwise it returns None.
upper_bin_limits -> np.ndarray or None
Property which returns an array of energy values for the upper limits of each energy bin, if the calibration
coefficients are supplied, otherwise it returns None.
all_bin_limits -> np.ndarray or None
Property which returns an array of energy values for the upper and lower limits of each energy bin, if the
calibration coefficients are supplied, otherwise it returns None.
centre_bin_energies -> np.ndarray or None
Property which returns an array of energy values for the centre of each energy bin, if the calibration
coefficients are supplied, otherwise it returns None.
find_channel(energy: float) -> int or None
Method which returns the channel number in which a deposit of energy 'energy' would be counted. If calibration
coefficients are not provided, or if the energy is outside the bounds of the energy scale, this method returns
None.
energy_mask(lower: float, upper: float) -> np.ndarray or None
Method which returns a boolean array which can be applied as a boolean mask on arrays of the same length. The
'lower' and 'upper' parameters set the lower and upper limits on the energy scale for which the mask is True.
If no calibration coefficients are supplied, this method returns None.
from_energies(energies: np.ndarray) -> Calibration
Class method which returns a Calibration instance calibrated on the energy values in the 'energy' array.
channel_numbers_only(n_channels: int) -> Calibration
Class method which returns a Calibration instance without calibration coefficients, with label equal to
'Channel #'
"""
def __init__(
self,
n_channels: int = 1024,
coefficients: Union[np.ndarray, Tuple[float, float, float], List[float], None] = None,
label: Union[str, None] = None
):
"""
Parameters
----------
n_channels : int
Number of channels, default = 1024.
coefficients : Tuple[float, float, float]
offset, linear and quadratic calibration coefficients which define the lower energy bound of each channel
label : Optional[str]
Label for the energy scale. Used when plotting. Default is "Energy, keV". Use this if energy is in different
units.
"""
self.n_channels = n_channels
self._is_calibrated = coefficients is not None
if self.is_calibrated:
self.coefficients: Union[np.ndarray, None] = np.array(coefficients)
else:
self.coefficients: Union[np.ndarray, None] = None
self.label = label if label is not None else 'Energy, keV'
@property
def is_calibrated(self) -> bool:
"""
Property which returns True if calibration coefficients provided.
Returns
-------
bool
"""
return self._is_calibrated
@property
def channel_numbers(self) -> np.ndarray:
"""
Property which returns a 1D array of channel numbers for the calibration.
Returns
-------
np.ndarray
"""
return np.arange(self.n_channels)
@property
def bin_widths(self) -> Union[np.ndarray, None]:
"""
Property which returns an array of bin widths in energy units if calibration coefficients are supplied,
otherwise it returns None.
Returns
-------
np.ndarray or None
"""
if self.is_calibrated:
return self.upper_bin_limits - self.lower_bin_limits
else:
return None
@property
def lower_bin_limits(self) -> Union[np.ndarray, None]:
"""
Property which returns an array of energy values for the lower limits of each energy bin, if the calibration
coefficients are supplied, otherwise it returns None.
Returns
-------
np.ndarray or None
"""
if self.is_calibrated:
return self.all_bin_limits[:-1]
else:
return None
@property
def upper_bin_limits(self) -> Union[np.ndarray, None]:
"""
Property which returns an array of energy values for the upper limits of each energy bin, if the calibration
coefficients are supplied, otherwise it returns None.
Returns
-------
np.ndarray or None
"""
if self.is_calibrated:
return self.all_bin_limits[1:]
else:
return None
@property
def all_bin_limits(self) -> Union[np.ndarray, None]:
"""
Property which returns an array of energy values for the upper and lower limits of each energy bin, if the
calibration coefficients are supplied, otherwise it returns None.
Returns
-------
np.ndarray or None
"""
if self.is_calibrated:
channels = np.arange(0., self.n_channels + 1)
powers = np.power(channels[:, np.newaxis], np.arange(3)[np.newaxis, :])
return (powers * self.coefficients[np.newaxis, :]).sum(axis=1)
else:
return None
@property
def centre_bin_energies(self) -> Union[np.ndarray, None]:
"""
Property which returns an array of energy values for the centre of each energy bin, if the calibration
coefficients are supplied, otherwise it returns None.
Returns
-------
np.ndarray or None
"""
if self.is_calibrated:
return (self.lower_bin_limits + self.upper_bin_limits) / 2.
else:
return None
@property
def energies(self) -> np.ndarray:
"""
Property which returns an array of energy values for the lower limits of each energy bin, if the calibration
coefficients are supplied, otherwise it returns the channel numbers.
Returns
-------
np.ndarray
"""
if self.is_calibrated:
return self.lower_bin_limits
else:
return self.channel_numbers
def find_channel(self, energy: float) -> Union[int, None]:
"""
Method which returns the channel number in which a deposit of energy 'energy' would be counted. If calibration
coefficients are not provided, or if the energy is outside the bounds of the energy scale, this method returns
None.
Parameters
----------
energy : float
Energy in the same units as the calibration coefficients.
Returns
-------
int or None
"""
if self.is_calibrated:
arr = np.arange(self.n_channels)[np.logical_and(self.lower_bin_limits <= energy,
self.upper_bin_limits > energy)]
if arr.size == 0:
return None
else:
return int(arr[0])
else:
return None
def energy_mask(self, lower: float = None, upper: float = None) -> Union[np.ndarray, None]:
"""
Method which returns a boolean array which can be applied as a boolean mask on arrays of the same length. The
'lower' and 'upper' parameters set the lower and upper limits on the energy scale for which the mask is True.
If no calibration coefficients are supplied, this method returns None.
Parameters
----------
lower : float or None
Lower energy limit for the boolean mask
upper : float or None
Upper energy limit for the boolean mask
Returns
-------
np.ndarray
"""
if self.is_calibrated:
mask = np.ones(self.n_channels, dtype=bool)
if upper is not None:
mask = np.logical_and(mask, self.energies < upper)
if lower is not None:
mask = np.logical_and(mask, self.energies >= lower)
return mask
else:
return None
def __eq__(self, other: Calibration) -> bool:
return np.isclose(self.coefficients, other.coefficients).all() and self.n_channels == other.n_channels
@classmethod
def from_energies(cls, energies: np.ndarray, bin_label: str = 'lower') -> Calibration:
"""
Class method which returns a Calibration instance calibrated on the energy values in the 'energy' array.
Parameters
----------
energies : np.ndarray
Array of energy values from which to create a calibration object.
bin_label : str
Either 'upper', 'middle' or 'lower', specifying where in each bin the energy array is labelling.
"""
upper = bin_label == 'upper'
middle = bin_label == 'middle'
lower = bin_label == 'lower'
assert isinstance(energies, np.ndarray)
try:
assert upper or middle or lower
except AssertionError as _:
raise AssertionError(f"'bin_label' keyword argument to Classes.Calibration must be either 'upper', "
f"'middle', or 'lower'. You gave '{bin_label}'")
channels = energies.size
grad = np.gradient(energies)
linear = np.array_equal(grad[1:], grad[:-1])
if middle:
energies -= grad/2.
elif upper:
energies -= grad
if linear:
coefficients = np.array([energies[0], grad[0], 0])
else:
coefficients = np.polyfit(np.arange(channels), energies, 2)[::-1]
return cls(channels, coefficients)
@classmethod
def channel_numbers_only(cls, n_channels: int) -> Calibration:
"""
Class method which returns a Calibration instance without calibration coefficients, with label equal to
'Channel #'
Parameters
----------
n_channels : int
Number of channels.
Returns
-------
Calibration
"""
return cls(n_channels=n_channels, label='Channel #')
|
the-stack_106_24069 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
import aria2p
from asyncio import sleep
from subprocess import PIPE, Popen
from userbot import LOGS, CMD_HELP
from userbot.events import register
from requests import get
def subprocess_run(cmd):
subproc = Popen(cmd, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True)
talk = subproc.communicate()
exitCode = subproc.returncode
if exitCode != 0:
return
return talk
# Get best trackers for improved download speeds, thanks K-E-N-W-A-Y.
trackers_list = get(
'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_best.txt'
).text.replace('\n\n', ',')
trackers = f"[{trackers_list}]"
cmd = f"aria2c \
--enable-rpc \
--rpc-listen-all=false \
--rpc-listen-port 6800 \
--max-connection-per-server=10 \
--rpc-max-request-size=1024M \
--seed-time=0.01 \
--max-upload-limit=5K \
--max-concurrent-downloads=5 \
--min-split-size=10M \
--follow-torrent=mem \
--split=10 \
--bt-tracker={trackers} \
--daemon=true \
--allow-overwrite=true"
aria2_is_running = subprocess_run(cmd)
aria2 = aria2p.API(aria2p.Client(host="http://localhost", port=6800,
secret=""))
@register(outgoing=True, pattern="^.amag(?: |$)(.*)")
async def magnet_download(event):
magnet_uri = event.pattern_match.group(1)
# Add Magnet URI Into Queue
try:
download = aria2.add_magnet(magnet_uri)
except Exception as e:
LOGS.info(str(e))
return await event.edit("Error:\n`" + str(e) + "`")
gid = download.gid
await check_progress_for_dl(gid=gid, event=event, previous=None)
await sleep(5)
new_gid = await check_metadata(gid)
await check_progress_for_dl(gid=new_gid, event=event, previous=None)
@register(outgoing=True, pattern="^.ator(?: |$)(.*)")
async def torrent_download(event):
torrent_file_path = event.pattern_match.group(1)
# Add Torrent Into Queue
try:
download = aria2.add_torrent(torrent_file_path,
uris=None,
options=None,
position=None)
except Exception as e:
return await event.edit(str(e))
gid = download.gid
await check_progress_for_dl(gid=gid, event=event, previous=None)
@register(outgoing=True, pattern="^.aurl(?: |$)(.*)")
async def aurl_download(event):
uri = [event.pattern_match.group(1)]
try: # Add URL Into Queue
download = aria2.add_uris(uri, options=None, position=None)
except Exception as e:
LOGS.info(str(e))
return await event.edit("Error :\n`{}`".format(str(e)))
gid = download.gid
await check_progress_for_dl(gid=gid, event=event, previous=None)
file = aria2.get_download(gid)
if file.followed_by_ids:
new_gid = await check_metadata(gid)
await check_progress_for_dl(gid=new_gid, event=event, previous=None)
@register(outgoing=True, pattern="^.aclear(?: |$)(.*)")
async def remove_all(event):
try:
removed = aria2.remove_all(force=True)
aria2.purge_all()
except Exception:
pass
if not removed: # If API returns False Try to Remove Through System Call.
subprocess_run("aria2p remove-all")
await event.edit("`Clearing on-going downloads... `")
await sleep(2.5)
await event.edit("`Successfully cleared all downloads.`")
await sleep(2.5)
@register(outgoing=True, pattern="^.apause(?: |$)(.*)")
async def pause_all(event):
# Pause ALL Currently Running Downloads.
await event.edit("`Pausing downloads...`")
aria2.pause_all(force=True)
await sleep(2.5)
await event.edit("`Successfully paused on-going downloads.`")
await sleep(2.5)
@register(outgoing=True, pattern="^.aresume(?: |$)(.*)")
async def resume_all(event):
await event.edit("`Resuming downloads...`")
aria2.resume_all()
await sleep(1)
await event.edit("`Downloads resumed.`")
await sleep(2.5)
await event.delete()
@register(outgoing=True, pattern="^.ashow(?: |$)(.*)")
async def show_all(event):
output = "output.txt"
downloads = aria2.get_downloads()
msg = ""
for download in downloads:
msg = msg + "File: `" + str(download.name) + "`\nSpeed: " + str(
download.download_speed_string()) + "\nProgress: " + str(
download.progress_string()) + "\nTotal Size: " + str(
download.total_length_string()) + "\nStatus: " + str(
download.status) + "\nETA: " + str(
download.eta_string()) + "\n\n"
if len(msg) <= 4096:
await event.edit("`On-going Downloads: `\n" + msg)
await sleep(5)
await event.delete()
else:
await event.edit("`Output is too big, sending it as a file...`")
with open(output, 'w') as f:
f.write(msg)
await sleep(2)
await event.delete()
await event.client.send_file(
event.chat_id,
output,
force_document=True,
supports_streaming=False,
allow_cache=False,
reply_to=event.message.id,
)
async def check_metadata(gid):
file = aria2.get_download(gid)
new_gid = file.followed_by_ids[0]
LOGS.info("Changing GID " + gid + " to" + new_gid)
return new_gid
async def check_progress_for_dl(gid, event, previous):
complete = None
while not complete:
file = aria2.get_download(gid)
complete = file.is_complete
try:
if not complete and not file.error_message:
msg = f"\nDownloading File Name:\n`{file.name}`\n\n"
msg += f"`Status`\n**{file.status.capitalize()}**\n"
msg += f"`Speed :` {file.download_speed_string()}\n"
msg += f"`Progress :` {file.progress_string()}\n"
msg += f"`Total Size :` {file.total_length_string()}\n"
msg += f"`ETA :` {file.eta_string()}\n"
if msg != previous:
await event.edit(msg)
msg = previous
else:
await event.edit(f"`{msg}`")
await sleep(5)
await check_progress_for_dl(gid, event, previous)
file = aria2.get_download(gid)
complete = file.is_complete
if complete:
return await event.edit(f"`{file.name}`\n\n"
"Successfully downloaded...")
except Exception as e:
if " not found" in str(e) or "'file'" in str(e):
await event.edit("Download Canceled :\n`{}`".format(file.name))
await sleep(2.5)
return await event.delete()
elif " depth exceeded" in str(e):
file.remove(force=True)
await event.edit(
"Download Auto Canceled :\n`{}`\nYour Torrent/Link is Dead."
.format(file.name))
CMD_HELP.update({
"aria":
".aurl [URL] (or) .amag [Magnet Link] (or) .ator [path to torrent file]\
\nUsage: Downloads the file into your userbot server storage.\
\n\n.apause (or) .aresume\
\nUsage: Pauses/resumes on-going downloads.\
\n\n.aclear\
\nUsage: Clears the download queue, deleting all on-going downloads.\
\n\n.ashow\
\nUsage: Shows progress of the on-going downloads."
})
|
the-stack_106_24071 | import json
import unittest
import urllib.request
from urllib.error import URLError
from bs4 import BeautifulSoup
from django import template
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.urls import reverse
from mock import patch
from wagtail.core import blocks
from wagtail.core.rich_text import expand_db_html
from wagtail.embeds import oembed_providers
from wagtail.embeds.blocks import EmbedBlock, EmbedValue
from wagtail.embeds.embeds import get_embed
from wagtail.embeds.exceptions import EmbedNotFoundException, EmbedUnsupportedProviderException
from wagtail.embeds.finders import get_finders
from wagtail.embeds.finders.embedly import EmbedlyFinder as EmbedlyFinder
from wagtail.embeds.finders.embedly import AccessDeniedEmbedlyException, EmbedlyException
from wagtail.embeds.finders.oembed import OEmbedFinder as OEmbedFinder
from wagtail.embeds.models import Embed
from wagtail.embeds.rich_text import MediaEmbedHandler, media_embedtype_handler
from wagtail.embeds.templatetags.wagtailembeds_tags import embed_tag
from wagtail.tests.utils import WagtailTestUtils
try:
import embedly # noqa
no_embedly = False
except ImportError:
no_embedly = True
class TestGetFinders(TestCase):
def test_defaults_to_oembed(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], OEmbedFinder)
# New WAGTAILEMBEDS_FINDERS setting
@override_settings(WAGTAILEMBEDS_FINDERS=[
{
'class': 'wagtail.embeds.finders.oembed'
}
])
def test_new_find_oembed(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], OEmbedFinder)
@override_settings(WAGTAILEMBEDS_FINDERS=[
{
'class': 'wagtail.embeds.finders.embedly',
'key': 'foo',
}
])
def test_new_find_embedly(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], EmbedlyFinder)
self.assertEqual(finders[0].get_key(), 'foo')
@override_settings(WAGTAILEMBEDS_FINDERS=[
{
'class': 'wagtail.embeds.finders.oembed',
'options': {'foo': 'bar'}
}
])
def test_new_find_oembed_with_options(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], OEmbedFinder)
self.assertEqual(finders[0].options, {'foo': 'bar'})
class TestEmbeds(TestCase):
def setUp(self):
self.hit_count = 0
def dummy_finder(self, url, max_width=None):
# Up hit count
self.hit_count += 1
# Return a pretend record
return {
'title': "Test: " + url,
'type': 'video',
'thumbnail_url': '',
'width': max_width if max_width else 640,
'height': 480,
'html': "<p>Blah blah blah</p>",
}
def test_get_embed(self):
embed = get_embed('www.test.com/1234', max_width=400, finder=self.dummy_finder)
# Check that the embed is correct
self.assertEqual(embed.title, "Test: www.test.com/1234")
self.assertEqual(embed.type, 'video')
self.assertEqual(embed.width, 400)
# Check ratio calculations
self.assertEqual(embed.ratio, 480 / 400)
self.assertEqual(embed.ratio_css, '120.0%')
self.assertTrue(embed.is_responsive)
# Check that there has only been one hit to the backend
self.assertEqual(self.hit_count, 1)
# Look for the same embed again and check the hit count hasn't increased
embed = get_embed('www.test.com/1234', max_width=400, finder=self.dummy_finder)
self.assertEqual(self.hit_count, 1)
# Look for a different embed, hit count should increase
embed = get_embed('www.test.com/4321', max_width=400, finder=self.dummy_finder)
self.assertEqual(self.hit_count, 2)
# Look for the same embed with a different width, this should also increase hit count
embed = get_embed('www.test.com/4321', finder=self.dummy_finder)
self.assertEqual(self.hit_count, 3)
def dummy_finder_invalid_width(self, url, max_width=None):
# Return a record with an invalid width
return {
'title': "Test: " + url,
'type': 'video',
'thumbnail_url': '',
'width': '100%',
'height': 480,
'html': "<p>Blah blah blah</p>",
}
def test_invalid_width(self):
embed = get_embed('www.test.com/1234', max_width=400, finder=self.dummy_finder_invalid_width)
# Width must be set to None
self.assertEqual(embed.width, None)
def test_no_html(self):
def no_html_finder(url, max_width=None):
"""
A finder which returns everything but HTML
"""
embed = self.dummy_finder(url, max_width)
embed['html'] = None
return embed
embed = get_embed('www.test.com/1234', max_width=400, finder=no_html_finder)
self.assertEqual(embed.html, '')
@override_settings(WAGTAILEMBEDS_FINDERS=[])
def test_no_finders_available(self):
with self.assertRaises(EmbedUnsupportedProviderException):
get_embed('www.test.com/1234', max_width=400)
class TestChooser(TestCase, WagtailTestUtils):
def setUp(self):
# login
self.login()
def test_chooser(self):
r = self.client.get('/admin/embeds/chooser/')
self.assertEqual(r.status_code, 200)
def test_chooser_with_edit_params(self):
r = self.client.get('/admin/embeds/chooser/?url=http://example2.com')
self.assertEqual(r.status_code, 200)
response_json = json.loads(r.content.decode())
self.assertEqual(response_json['step'], 'chooser')
self.assertIn('value="http://example2.com"', response_json['html'])
@patch('wagtail.embeds.embeds.get_embed')
def test_submit_valid_embed(self, get_embed):
get_embed.return_value = Embed(html='<img src="http://www.example.com" />', title="An example embed")
response = self.client.post(reverse('wagtailembeds:chooser_upload'), {
'url': 'http://www.example.com/'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'embed_chosen')
self.assertEqual(response_json['embed_data']['title'], "An example embed")
@patch('wagtail.embeds.embeds.get_embed')
def test_submit_unrecognised_embed(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
response = self.client.post(reverse('wagtailembeds:chooser_upload'), {
'url': 'http://www.example.com/'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
self.assertIn("Cannot find an embed for this URL.", response_json['html'])
class TestEmbedly(TestCase):
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_oembed_called_with_correct_arguments(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com'}
EmbedlyFinder(key='foo').find_embed('http://www.example.com')
oembed.assert_called_with('http://www.example.com', better=False)
EmbedlyFinder(key='foo').find_embed('http://www.example.com', max_width=100)
oembed.assert_called_with('http://www.example.com', maxwidth=100, better=False)
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_401(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 401}
self.assertRaises(AccessDeniedEmbedlyException,
EmbedlyFinder(key='foo').find_embed, 'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_403(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 403}
self.assertRaises(AccessDeniedEmbedlyException,
EmbedlyFinder(key='foo').find_embed, 'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_404(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 404}
self.assertRaises(EmbedNotFoundException,
EmbedlyFinder(key='foo').find_embed, 'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_other_error(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 999}
self.assertRaises(EmbedlyException, EmbedlyFinder(key='foo').find_embed,
'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_html_conversion(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result['html'], '<img src="http://www.example.com" />')
oembed.return_value = {'type': 'something else',
'html': '<foo>bar</foo>'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result['html'], '<foo>bar</foo>')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_return_value(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'something else',
'html': '<foo>bar</foo>'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result, {
'title': '',
'author_name': '',
'provider_name': '',
'type': 'something else',
'thumbnail_url': None,
'width': None,
'height': None,
'html': '<foo>bar</foo>'})
oembed.return_value = {'type': 'something else',
'author_name': 'Alice',
'provider_name': 'Bob',
'title': 'foo',
'thumbnail_url': 'http://www.example.com',
'width': 100,
'height': 100,
'html': '<foo>bar</foo>'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result, {'type': 'something else',
'author_name': 'Alice',
'provider_name': 'Bob',
'title': 'foo',
'thumbnail_url': 'http://www.example.com',
'width': 100,
'height': 100,
'html': '<foo>bar</foo>'})
class TestOembed(TestCase):
def setUp(self):
class DummyResponse:
def read(self):
return b"foo"
self.dummy_response = DummyResponse()
def test_oembed_invalid_provider(self):
self.assertRaises(EmbedNotFoundException, OEmbedFinder().find_embed, "foo")
def test_oembed_invalid_request(self):
config = {'side_effect': URLError('foo')}
with patch.object(urllib.request, 'urlopen', **config):
self.assertRaises(EmbedNotFoundException, OEmbedFinder().find_embed,
"http://www.youtube.com/watch/")
@patch('urllib.request.urlopen')
@patch('json.loads')
def test_oembed_photo_request(self, loads, urlopen):
urlopen.return_value = self.dummy_response
loads.return_value = {'type': 'photo',
'url': 'http://www.example.com'}
result = OEmbedFinder().find_embed("http://www.youtube.com/watch/")
self.assertEqual(result['type'], 'photo')
self.assertEqual(result['html'], '<img src="http://www.example.com" />')
loads.assert_called_with("foo")
@patch('urllib.request.urlopen')
@patch('json.loads')
def test_oembed_return_values(self, loads, urlopen):
urlopen.return_value = self.dummy_response
loads.return_value = {
'type': 'something',
'url': 'http://www.example.com',
'title': 'test_title',
'author_name': 'test_author',
'provider_name': 'test_provider_name',
'thumbnail_url': 'test_thumbail_url',
'width': 'test_width',
'height': 'test_height',
'html': 'test_html'
}
result = OEmbedFinder().find_embed("http://www.youtube.com/watch/")
self.assertEqual(result, {
'type': 'something',
'title': 'test_title',
'author_name': 'test_author',
'provider_name': 'test_provider_name',
'thumbnail_url': 'test_thumbail_url',
'width': 'test_width',
'height': 'test_height',
'html': 'test_html'
})
def test_oembed_accepts_known_provider(self):
finder = OEmbedFinder(providers=[oembed_providers.youtube])
self.assertTrue(finder.accept("http://www.youtube.com/watch/"))
def test_oembed_doesnt_accept_unknown_provider(self):
finder = OEmbedFinder(providers=[oembed_providers.twitter])
self.assertFalse(finder.accept("http://www.youtube.com/watch/"))
@patch('urllib.request.urlopen')
@patch('json.loads')
def test_endpoint_with_format_param(self, loads, urlopen):
urlopen.return_value = self.dummy_response
loads.return_value = {'type': 'video',
'url': 'http://www.example.com'}
result = OEmbedFinder().find_embed("https://vimeo.com/217403396")
self.assertEqual(result['type'], 'video')
request = urlopen.call_args[0][0]
self.assertEqual(request.get_full_url().split('?')[0], "http://www.vimeo.com/api/oembed.json")
class TestEmbedTag(TestCase):
@patch('wagtail.embeds.embeds.get_embed')
def test_direct_call(self, get_embed):
get_embed.return_value = Embed(html='<img src="http://www.example.com" />')
result = embed_tag('http://www.youtube.com/watch/')
self.assertEqual(result, '<img src="http://www.example.com" />')
@patch('wagtail.embeds.embeds.get_embed')
def test_call_from_template(self, get_embed):
get_embed.return_value = Embed(html='<img src="http://www.example.com" />')
temp = template.Template('{% load wagtailembeds_tags %}{% embed "http://www.youtube.com/watch/" %}')
result = temp.render(template.Context())
self.assertEqual(result, '<img src="http://www.example.com" />')
@patch('wagtail.embeds.embeds.get_embed')
def test_catches_embed_not_found(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
temp = template.Template('{% load wagtailembeds_tags %}{% embed "http://www.youtube.com/watch/" %}')
result = temp.render(template.Context())
self.assertEqual(result, '')
class TestEmbedBlock(TestCase):
def test_deserialize(self):
"""
Deserialising the JSONish value of an EmbedBlock (a URL) should give us an EmbedValue
for that URL
"""
block = EmbedBlock(required=False)
block_val = block.to_python('http://www.example.com/foo')
self.assertIsInstance(block_val, EmbedValue)
self.assertEqual(block_val.url, 'http://www.example.com/foo')
# empty values should yield None
empty_block_val = block.to_python('')
self.assertEqual(empty_block_val, None)
def test_serialize(self):
block = EmbedBlock(required=False)
block_val = EmbedValue('http://www.example.com/foo')
serialized_val = block.get_prep_value(block_val)
self.assertEqual(serialized_val, 'http://www.example.com/foo')
serialized_empty_val = block.get_prep_value(None)
self.assertEqual(serialized_empty_val, '')
@patch('wagtail.embeds.embeds.get_embed')
def test_render(self, get_embed):
get_embed.return_value = Embed(html='<h1>Hello world!</h1>')
block = EmbedBlock()
block_val = block.to_python('http://www.example.com/foo')
temp = template.Template('embed: {{ embed }}')
context = template.Context({'embed': block_val})
result = temp.render(context)
# Check that the embed was in the returned HTML
self.assertIn('<h1>Hello world!</h1>', result)
# Check that get_embed was called correctly
get_embed.assert_any_call('http://www.example.com/foo')
@patch('wagtail.embeds.embeds.get_embed')
def test_render_within_structblock(self, get_embed):
"""
When rendering the value of an EmbedBlock directly in a template
(as happens when accessing it as a child of a StructBlock), the
proper embed output should be rendered, not the URL.
"""
get_embed.return_value = Embed(html='<h1>Hello world!</h1>')
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('embed', EmbedBlock()),
])
block_val = block.to_python({'title': 'A test', 'embed': 'http://www.example.com/foo'})
temp = template.Template('embed: {{ self.embed }}')
context = template.Context({'self': block_val})
result = temp.render(context)
self.assertIn('<h1>Hello world!</h1>', result)
# Check that get_embed was called correctly
get_embed.assert_any_call('http://www.example.com/foo')
def test_render_form(self):
"""
The form field for an EmbedBlock should be a text input containing
the URL
"""
block = EmbedBlock()
form_html = block.render_form(EmbedValue('http://www.example.com/foo'), prefix='myembed')
self.assertIn('<input ', form_html)
self.assertIn('value="http://www.example.com/foo"', form_html)
def test_value_from_form(self):
"""
EmbedBlock should be able to turn a URL submitted as part of a form
back into an EmbedValue
"""
block = EmbedBlock(required=False)
block_val = block.value_from_datadict({'myembed': 'http://www.example.com/foo'}, {}, prefix='myembed')
self.assertIsInstance(block_val, EmbedValue)
self.assertEqual(block_val.url, 'http://www.example.com/foo')
# empty value should result in None
empty_val = block.value_from_datadict({'myembed': ''}, {}, prefix='myembed')
self.assertEqual(empty_val, None)
def test_default(self):
block1 = EmbedBlock()
self.assertEqual(block1.get_default(), None)
block2 = EmbedBlock(default='')
self.assertEqual(block2.get_default(), None)
block3 = EmbedBlock(default=None)
self.assertEqual(block3.get_default(), None)
block4 = EmbedBlock(default='http://www.example.com/foo')
self.assertIsInstance(block4.get_default(), EmbedValue)
self.assertEqual(block4.get_default().url, 'http://www.example.com/foo')
block5 = EmbedBlock(default=EmbedValue('http://www.example.com/foo'))
self.assertIsInstance(block5.get_default(), EmbedValue)
self.assertEqual(block5.get_default().url, 'http://www.example.com/foo')
def test_clean_required(self):
block = EmbedBlock()
cleaned_value = block.clean(
EmbedValue('https://www.youtube.com/watch?v=_U79Wc965vw'))
self.assertIsInstance(cleaned_value, EmbedValue)
self.assertEqual(cleaned_value.url,
'https://www.youtube.com/watch?v=_U79Wc965vw')
with self.assertRaisesMessage(ValidationError, ''):
block.clean(None)
def test_clean_non_required(self):
block = EmbedBlock(required=False)
cleaned_value = block.clean(
EmbedValue('https://www.youtube.com/watch?v=_U79Wc965vw'))
self.assertIsInstance(cleaned_value, EmbedValue)
self.assertEqual(cleaned_value.url,
'https://www.youtube.com/watch?v=_U79Wc965vw')
cleaned_value = block.clean(None)
self.assertIsNone(cleaned_value)
def test_clean_invalid_url(self):
non_required_block = EmbedBlock(required=False)
with self.assertRaises(ValidationError):
non_required_block.clean(
EmbedValue('http://no-oembed-here.com/something'))
required_block = EmbedBlock()
with self.assertRaises(ValidationError):
required_block.clean(
EmbedValue('http://no-oembed-here.com/something'))
class TestMediaEmbedHandler(TestCase):
def test_get_db_attributes(self):
soup = BeautifulSoup('<b data-url="test-url">foo</b>', 'html5lib')
tag = soup.b
result = MediaEmbedHandler.get_db_attributes(tag)
self.assertEqual(result,
{'url': 'test-url'})
@patch('wagtail.embeds.embeds.get_embed')
def test_expand_db_attributes_for_editor(self, get_embed):
get_embed.return_value = Embed(
url='http://www.youtube.com/watch/',
max_width=None,
type='video',
html='test html',
title='test title',
author_name='test author name',
provider_name='test provider name',
thumbnail_url='http://test/thumbnail.url',
width=1000,
height=1000,
)
result = MediaEmbedHandler.expand_db_attributes(
{'url': 'http://www.youtube.com/watch/'}
)
self.assertIn(
(
'<div class="embed-placeholder" contenteditable="false" data-embedtype="media"'
' data-url="http://www.youtube.com/watch/">'
),
result
)
self.assertIn('<h3>test title</h3>', result)
self.assertIn('<p>URL: http://www.youtube.com/watch/</p>', result)
self.assertIn('<p>Provider: test provider name</p>', result)
self.assertIn('<p>Author: test author name</p>', result)
self.assertIn('<img src="http://test/thumbnail.url" alt="test title">', result)
@patch('wagtail.embeds.embeds.get_embed')
def test_test_expand_db_attributes_for_editor_catches_embed_not_found(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
result = MediaEmbedHandler.expand_db_attributes(
{'url': 'http://www.youtube.com/watch/'},
)
self.assertEqual(result, '')
@patch('wagtail.embeds.embeds.get_embed')
def test_expand_db_attributes(self, get_embed):
get_embed.return_value = Embed(
url='http://www.youtube.com/watch/',
max_width=None,
type='video',
html='test html',
title='test title',
author_name='test author name',
provider_name='test provider name',
thumbnail_url='htto://test/thumbnail.url',
width=1000,
height=1000,
)
result = media_embedtype_handler(
{'url': 'http://www.youtube.com/watch/'}
)
self.assertIn('test html', result)
@patch('wagtail.embeds.embeds.get_embed')
def test_expand_db_attributes_catches_embed_not_found(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
result = media_embedtype_handler(
{'url': 'http://www.youtube.com/watch/'}
)
self.assertEqual(result, '')
@patch('wagtail.embeds.embeds.get_embed')
def test_expand_html_escaping_end_to_end(self, get_embed):
get_embed.return_value = Embed(
url='http://www.youtube.com/watch/',
max_width=None,
type='video',
html='test html',
title='test title',
author_name='test author name',
provider_name='test provider name',
thumbnail_url='htto://test/thumbnail.url',
width=1000,
height=1000,
)
result = expand_db_html('<p>1 2 <embed embedtype="media" url="https://www.youtube.com/watch?v=O7D-1RG-VRk&t=25" /> 3 4</p>')
self.assertIn('test html', result)
get_embed.assert_called_with('https://www.youtube.com/watch?v=O7D-1RG-VRk&t=25')
|
the-stack_106_24075 | import pytest
import eff
def test_init():
e = eff.Effects(print=print)
assert e.print is print
def test_short_alias():
assert eff.ects is eff.Effects
e = eff.ects(print=print)
assert e.print is print
def test_no_attr():
e = eff.Effects()
with pytest.raises(AttributeError):
e.print
def test_context():
with eff.Effects(print=print) as e:
assert e.print is print
def test_context_class_access():
with eff.Effects(print=print):
assert eff.Effects.print is print
def test_context_no_suppress():
with pytest.raises(ZeroDivisionError):
with eff.Effects(print=print):
raise ZeroDivisionError
def test_deep_context():
sent1 = object()
sent2 = object()
with eff.Effects(sent=sent1) as e1:
assert e1.sent is sent1
assert eff.Effects.sent is sent1
with eff.Effects(sent=sent2) as e2:
assert e1.sent is sent1
assert e2.sent is sent2
assert eff.Effects.sent is sent2
assert e1.sent is sent1
assert eff.Effects.sent is sent1
def test_two_contexts():
class E1(eff.Effects):
pass
class E2(eff.Effects):
pass
sent1 = object()
sent2 = object()
with E1(sent=sent1) as e1, E2(sent=sent2) as e2:
assert isinstance(e1, E1)
assert isinstance(e2, E2)
assert e1.sent is sent1
assert e2.sent is sent2
assert E1.sent is sent1
assert E2.sent is sent2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.