filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_21324
|
import os.path
import sys
from libs.pascalVocIO import PascalVocWriter
from base64 import b64encode, b64decode
class LabelFileError(Exception):
pass
class LabelFile(object):
# It might be changed as window creates
suffix = '.lif'
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
if filename is not None:
self.load(filename)
def savePascalVocFormat(
self,
savefilename,
image_size,
shapes,
imagePath=None,
databaseSrc=None,
shape_type_='RECT'):
imgFolderPath = os.path.dirname(imagePath) # ่ฟๅๆไปถ่ทฏๅพ็็ฎๅฝ
imgFolderName = os.path.split(imgFolderPath)[-1] #
imgFileName = os.path.basename(imagePath) # ่ทๅพๆไปถๅ๏ผๅ
ๆฌๅ็ผ
imgFileNameWithoutExt = os.path.splitext(imgFileName)[0] # ่ทๅๆไปถๅ๏ผๅ
ๆฌๅ็ผ๏ผๆไปถๅ
print('imgaeFolderPath:', imgFolderPath)
print('imgaeFolderName:', imgFolderName)
print('imgFileName:', imgFileName)
print(' imgFileNameWithoutExt:', imgFileNameWithoutExt)
# img = cv2.imread(imagePath)
writer = PascalVocWriter(
imgFolderName,
imgFileNameWithoutExt,
image_size,
localImgPath=imagePath,
shape_type=shape_type_)
bSave = False
for shape in shapes:
points = shape['points']
label = shape['label']
if shape['shape_type'] == 0:
# ๆฃๆตๆจกๅผ
# ๆ นๆฎ็นๆฅๅๅปบไธไธชbndbox
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, shape['difficult'])
if shape['shape_type'] == 1:
# ๅๅฒๆจกๅผ
# ๆทปๅ ๅๅฒ็น
writer.addPolygon(points, label, instance_id=shape['instance_id'], ignore=shape['difficult'])
bSave = True
print('label savefilename:', savefilename)
if bSave:
writer.save(targetFile=savefilename) # ่ฟ้ๅญๅจ
return
@staticmethod
def isLabelFile(filename):
fileSuffix = os.path.splitext(filename)[1].lower()
return fileSuffix == LabelFile.suffix
@staticmethod
def convertPoints2BndBox(points):
xmin = sys.maxsize
ymin = sys.maxsize
xmax = -sys.maxsize
ymax = -sys.maxsize
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# Martin Kersner, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if (xmin < 1):
xmin = 1
if (ymin < 1):
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
|
the-stack_106_21325
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
class MapDatasetBenchmark(test.Benchmark):
def benchmarkChainOfMaps(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset chain length: %d Median wall time: %f"
% (chain_length, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_chain_latency_%d" % chain_length)
def benchmarkMapFanOut(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
for fan_out in fan_outs:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None).map(lambda *xs: xs)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element[0].op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element[0].op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset fan out: %d Median wall time: %f"
% (fan_out, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_fan_out_%d" % fan_out)
if __name__ == "__main__":
test.main()
|
the-stack_106_21328
|
# โโโ TASKS โโโ
# For Sharon:
# make sure all msgs you want are included (15 min intro for example)
# For Tami:
# make sure messages early in the day are sent in the morning (send one by one)
# send msgs with url as attachments with preview
international = {
# ''' DAY -5 ''' 5 dayื before the challenge
-5:{
"12:00:00": "image/C18/pre5.png"
,
"12:00:05":
'''
Hello everyone, I am surprised and happy with the number of participants and hope that this process will contribute to your growth and peace of mind in such a challenging period ๐๐ธ๐๐๐โค๏ธ๐ปโ๏ธ๐๐๐๐
'''
,
},
# ''' DAY -4 ''' 4 dayื before the challenge
-4:{
"12:00:00": "image/C18/pre4.png"
,
"12:00:05":
'''
It excites me that more change agents continue to join us โค๏ธ We will start in 4 days - at 20:00 GMT3.
The 18 Days of Sustainability and Leadership Challenge is a social group process inspired by the SDGโs โ Sustainable Development Goals, aimed at evolving and changing the reality in which we live.
This is a 10-20 minute short daily task, opening us up to a magical process of growth and new awareness, which will disconnect you from the immediate comfort zone and open up global awareness in you.
You are giving yourself an amazing gift and you will be happy about it.
Remember this is not only for you but for the community that you serve.
I most appreciate your participation, good will and patience,
May we all have a magical day full of sustainability and leadership ๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ
I am here for any question / help you need in private
To your success,
With playful regards,
Sharon Gal-Or
๐๐ธ๐
'''
,
},
# ''' DAY -3 ''' 3 dayื before the challenge
-3:{
"12:00:00": "image/C18/pre3.png"
,
"12:00:05":
'''
Hello everyone, I am surprised and happy with the number of participants and hope that this process will contribute to your growth and peace of mind in such a challenging period ๐๐ธ๐๐๐โค๏ธ๐ปโ๏ธ๐๐๐๐
'''
,
},
# ''' DAY -2 ''' 2 dayื before the challenge
-2:{
"12:00:00": "image/C18/pre2.png"
,
"12:00:05":
'''
As to assure that people stay in the group and go through the process, and so you do not have to bother and look for the task for a long time, I ask you to respond only about completing the tasks ... Those who need explanations - I would be very happy to answer in detail.
You are giving yourself an amazing gift and you will be happy about it,
May we all have a magical day full of Sustainability & Leadership ๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ
For those of you who are interested, you are welcome to join another group where we will share the personal experiences at the following group:
Challenge18 Think Tank-1 group for sharing ideas and asking questions ๐๐ธ๐
https://chat.whatsapp.com/LLGHvVQ7Z5oAKJR329CeDt
'''
,
},
# ''' DAY -1 ''' one day before the challenge
-1:{
"12:00:00": "image/C18/pre1.png"
,
"12:00:05":
'''
I know for some of you this is not the first time , but it is still very important to do the exercises.
I am sure that in light of the changing situation in the world, the awareness of performing the exercises will be different.
This is a time when inner fears often float, so I guess on some days some of you will experience less pleasant moments. I have no doubt that maintaining a high level of awareness through performing the exercises will allow us to cope more easily๐ธ
Starting tonight, and every day, I will send the tasks to be completed the next day:
1. Personal exercise for writing in a notebook (have a colorful notebook that will smile at you).
2. Background on the Global Goals with addition of extra flavor.
The process requires loyalty to yourself and the group so commit to yourself and the group. *At the end of the practice, mark the number of the task/s (1-3 if you chose to do more than one task) and a flower or a heart emoji ๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ โค๏ธโค๏ธโค๏ธโค๏ธโค๏ธ and for the next 18 days I will ask that this is all you will respond in the group*.
As always, anyone who needs support or explanations is welcome to send messages in private
May we all have an amazing and growing process of Sustainability & Leadership โ Sharon Gal-Or
'''
,
"12:00:11":
'''
I am surprised and happy with the number of participants and I hope that this process will contribute to your growth and peace of mind in such challenging times โค๏ธ
'''
,
},
# ''' DAY 0 ''' the challenge starting day
0:{
"12:00:00": "image/C18/pre0.png"
,
"12:00:05":
'''
Dear and sustaining friends
For those unfamiliar with this amazing journey named Challenge 18 - I will give you some information:
*This is an 18-day process, in which you are sent 1-3 short daily tasks inspired by the SDG's โ sustainable development goals, and alongside them, a basic background on the goals. Each evening I will send the task of the next day, and you will have 24 hours to perform and confirm in the group you did. Your confirmation is the day/goal number and next to it one or more emojis, according to the task/s you have chosen to perform.*
For example: a heart in any color you choose or a 'green' environmental emoji of your choice. The score is according to the task number, so the maximum number accumulates from day to day.
For example:
Task 1 = one point and the mark is โค๏ธ
Task 2 = two points and the mark is โค๏ธโค๏ธ or an emoji chosen to represent the task
Task 3 = three points and the mark is โค๏ธโค๏ธโค๏ธ or an emoji chosen to represent the task.
For example, per the 1st day the maximum score is 6 hearts/flowers โค๏ธโค๏ธโค๏ธโค๏ธโค๏ธโค๏ธ - this is to the extent that you have chosen to perform all three tasks.
*The score is cumulative throughout the challenge, both in your individual journey and for the whole group, so the commitment of each and every one of you is to yourself and the whole group. good luck.*
Since we are a large group, it may be a bit challenging, but I am sure you will be able to contain it, because you are amazing and sustainable and that you really care about our planet and future generations ๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ
I encourage you to be committed to the process. Since I do not believe in top-down management here, I would be happy for those who do not connect to leave the group so that the overall energy remains of doing and growing
Thank you all for the opportunity to give you from the heart โค๏ธ Anyone who needs my help with something, please contact me in private
'''
,
"12:00:10":
'''
Morning everybody,
Feel free to invite more change agents you think they would benefit from joining us.
Further clarification friends,
Every day at 20:00, the daily task will be sent to you, according to 1-3 commitment and difficulty levels. Choose one of them, with which you feel comfortable and you feel committed and connected to the process - you have 24 hours to do it and mark in the group you did it... *there is no need to be especially available every day at 20:00* ๐โฝ๐๐๐ฑ๐ธ
'''
,
"12:00:15":
'''
Hello everyone, it excites me that people continue to join us โค๏ธ
I remind you that today at 20:00 we will begin the amazing process of Sustainability & Leadership for each and every one of you ...
I encourage you to be committed to the process. Whoever is not committed in his heart to this process is welcome to leave the group with great joy ๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ
A stunning day for everyone
'''
,
"12:00:20":
'''
I know for some of you this is not the first time, but it is still very important to do the exercises.
I am sure that in light of the changing situation in the world, the awareness of performing the exercises will be different.
This is a time when inner fears often float, so I guess on some days some of you will experience less pleasant moments. I have no doubt that maintaining a high level of awareness through performing the exercises will allow us to cope more easily๐ธ
Starting tonight, and every day, I will send the tasks to be completed the next day:
1. Personal exercise for writing in a notebook (have a colorful notebook that will smile at you).
2. Background on the Global Goals with addition of extra flavor.
The process requires loyalty to yourself and the group so commit to yourself and the group. *At the end of the practice, mark the number of the task/s (1-3 if you chose to do more than one task) and a flower or a heart emoji ๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ โค๏ธโค๏ธโค๏ธโค๏ธโค๏ธ and for the next 18 days I will ask that this is all you will respond in the group*.
As always, anyone who needs support or explanations is welcome to send messages in private
May we all have an amazing and growing process of Sustainability & Leadership โ Sharon Gal-Or
'''
,
"12:00:25":
'''
Our change journey, of all humanity, depends on our ability to communicate in large numbers quickly and accurately in favor of 'fixing the world' and maintaining harmony.
I sincerely hope that Challenge 18 will help you expand your knowledge and imagination, assimilate the values โโof sustainability, and encourage you to become 'change agentsโ.
So congratulations, I give you my blessings, today you are starting the Ting Global 18 Days of Sustainability & Leadership challenge that can change the reality in which we live and save humanity, if only we choose to change our habits.
How?
We will start with daily practice ... towards a new reality that awaits you, your family, your community and future generations.
'''
,
},
# ''' DAY 1 '''
1:{
"19:59:50": "image/C18/1.png"
,
"19:59:57":
'''
Hello everyone,
We start ..... I wish you all good luck ๐ป and that you will get the most out of this magical process.
*Lesson 1 - "Good begets Good" - On Spiritual Wealth and Human Wealth*
Today's task according to 1-3 levels of commitment โ choose at least one of them, no matter what, the main thing is that you will feel comfortable and feel committed and connected to the process.
Day 1 Tasks:
1. *Watch the TED talk* - Poverty isn't a lack of character; it's a lack of cash | Rutger Bregman & reflect on what you saw.
https://youtu.be/ydKcaIE6O1k
2. *I have it - I have it* - a list of 18 things you already have in life.
3. *48 people* who have added value to your life - In the notebook you have chosen for the process, create a list of people who have added value to your life.
Write down everyone who is positive and who in your perception made you grow in some way.
The list must be complete according to the level of commitment you have chosen. It is important to remember that even people who seemingly did you "bad" made you grow. (While doing this, you will understand with yourself what the reason is for choosing them on the list). Proceed calmly, remembering the good things in each person, and what he / she has brought into your life.
Enjoy this activity!
''',
"20:00:07":
'''
*Welcome to the first day of the 18 days of Sustainability & Leadership challenge - NO POVERTY.*
Did you know that almost half of the world's population lives in poverty worldwide โ and that there are over a billion people - who still live on $ 1.25 a day or less, and the number has risen since the corona.
Should we blame those poor children who are born into such a reality?
& how those children will grow up knowing they will never be able to escape their poverty?
*What does this mean at all? What does this tell about our future? And what can be done?*
According to the SDGโs, the Sustainable Development Goals set by the UN member states, goal number 1 is to reduce by half by 2030, the relative percentage of the poor in the world. And in your country? What goal did you commit to? What goal did we commit to? What employment opportunities are available to those of us living in poverty? What services and products exist for those of us living in poverty?
Since the outbreak of the corona plague, the economic situation of many citizens around the world has been so difficult that they are sometimes forced to give up one of the most basic human needs - food.
Eliminating poverty is not a task of charity, but an act of justice.
*What can you do now to fight poverty?*
*And now for spiritual wealth.*
We have compiled a list of people who have added value to our lives and focused on what we have in life. *What is spiritual wealth and what does it have to do with real wealth?* We can understand this when we think of our family, a family that supports each other when needed. When your brother or sister achieves success, the whole family celebrates. At the deepest spiritual level, we are connected not only to our parents and siblings but to the whole world, as we grow in this awareness, we live in oneness, celebrating the success of all. The human family.
Spend some time practicing unity, be present and be frequently interested in the lives of others when you talk to them. In your conversations, give your full attention and if you see someone in need, offer help.
Sharpening your observation of your surroundings and doing these things will help you develop a deeper understanding of true relationships.
The main thought for today:
*"Who is a rich man? He who is happy with his share".*
'''
,
"20:00:17": "image/C18/1.opus"
,
# ''' DAY 2 '''
"09:31:00":
'''
Gooooooood morning ladies and gentlemen!
I remind you the marking of the task (s) is until tonight, 20:00
The score depends on the number of task (s) you have selected
And it accumulates both in the individual journey and in the group ๐๐ป๐ถ๐ต
I encourage you all who has performed to mark: 1โค๏ธ and raise the morale of the group and of yourselves as life is meaningful and powerful journey, never give up ๐๐ธ๐
'''
,
"09:31:10":
'''
๐ป๐ผ๐ธ๐บ๐น๐ท๐๐พ
you are all welcome to join the community group where we share the personal experiences and talk whatever in out heart about making the world a better place :
Challenge18 Think Tank-1 group for sharing ideas and asking questions ๐๐ธ๐
https://chat.whatsapp.com/LLGHvVQ7Z5oAKJR329CeDt
'''
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
# ''' DAY 2 '''
2:{
"20:00:00": "image/C18/2.png"
,
"20:00:05":
'''
Task for the second day - ZERO HUNGER ๐ ๐ ๐
Choose one of three tasks, no matter which one, the main thing is that you feel comfortable, committed and connected to the process. Challenge yourself to eat differently.
1. Watch the TED talk - What we're getting wrong in the fight to end hunger | Jasmine Crowe |& reflect on what you saw. Then, enter the telephone list of the group members and send to another group member of your thoughts from the video and in general in the context of poverty and hunger in your city/country and things to be done.
https://youtu.be/VasJyDmMafA
2. Avoid sweets - Choose not to eat sweets (Cakes, candies, soft drinks etc.) - from today until the end of the challenge, (at the end of the practice, mark the day number and ๐ 2 every day from now until the end of the challenge).
3. VegaNation Challenge yourself to eat vegan or vegetarian from today until the end of the challenge (at the end of the practice, mark the day number and ๐ 2 each day from now until the end of the challenge).
The main thing is that you maintain a better diet than you are used to. What do you say, can you?
After successfully completing the task until tomorrow at 20:00, type ๐ or ๐ 2 in the group and each day from now until the end of the challenge.
Remember: the more you allow yourself in these processes, the more you will get for your personal growth.
Continue an evening full of Sustainability & Leadership ๐ซ ๐ ๐ซ
''',
"20:00:08":
'''
*Welcome to the second day of the challenge - ZERO HUNGER*
We want more, consume more and throw away more, and not just food. What does this tell about us at all?
If we promote sustainable agriculture with modern technologies and fair distribution systems, we will be able to sustain the entire population of the world and make sure that no one will ever suffer from hunger.
According to the SDGโs, goal number 2 is - by 2030, to end hunger and ensure access for all human beings - especially the poor, children and people in vulnerable situations - to safe, nutritious and satisfying food throughout the world.
What is being done to stop the huge waste of food and make sure it reaches those who need it? Our planet has provided us with tremendous resources, but unequal access and ineffective care leave millions of people suffering from malnutrition.
*What can you change now in your food consumption habits? *
*One small change for you in your food consumption habits, one giant change for humanity.*
It is important to know that it is possible otherwise, that we can imagine and act for a better future - for a spiritual abundance that will replace the material and the extra waste.
The main thought for today: *"Who is a rich man? He who shares".*
'''
,
"20:00:13": "image/C18/2.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 3
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 3 '''
3:{
"20:00:00": "image/C18/3.png"
,
"20:00:05":
'''
Good evening and welcome to the 3rd day of sustainability and leadership practice ๐ธ๐ธ๐ธ๐ธ
People are leaving and it's natural so praise yourself for being still here ๐๐๐๐๐๐
I am personally proud of each and every one of you
๐ ๐ ๐
Regarding today's mission
Task for the third day ๐ โฝ ๐ - Good Health & Well Being - Choose one of the three tasks, the main thing is that you feel comfortable with it, committed and connected to the process.
1. Watch the TED talk - Nurse Innovation: Saving the Future of Healthcare | Rebecca Love | TEDxBeaconStreet | & reflect on what you saw.
https://youtu.be/IPBcRW8NQPY
2. Happy life - Make a list, (this time it does not matter the number) of the people who, to your opinion, are considered prosperous and healthy, in your family or in your social environment. People who have achieved their life goals (all or part of them) and are happy.
3. Planck It - From today until the end of the challenge, practice Plancking for at least a minute every day (at the end of the exercise, mark the day number and โฝ 3 every day from now until the end The challenge)
After successfully completing the task by tomorrow at 20:00, type ๐ 3 in the group and each day from now until the end of the challenge.
Remember: The more you allow yourself within these processes, the more you will get for your personal growth๐ โฝ ๐
Continue an evening full of Sustainability & Leadership
''',
"20:00:08":
'''
An important clarification regarding emojis and numbers.
The number represents the day in the challenge
The emoji represents a task type
โค๏ธ / ๐ป Represents a one-time daily task
Repetitive tasks:
To eat vegan ๐
Avoid sweets ๐
Plank-it โฝ
Example: The person who chose the challenge to avoid sweets from the second day until the end of the challenge marks in the group ๐ every day. If you also chose the plank-it challenge on the third day, then in addition, mark it every day.
'''
,
"20:00:10":
'''
Welcome to the third day of the challenge - GOOD HEALTH & WELL BEING
"Health is a basic human right" (International Health Organization), and a key indicator of sustainable development.
Women all over the world still lack access to medical care related to sex and fertility; Billions of people are left without access to essential medicines, millions of adults and children will suffer from malnutrition this year, and some estimate that the amount of waste will triple this year. This will have serious health effects. In addition, thousands of new cases of the corona plague continue to happen every day, leading to political and economic instability, threatening the livelihoods of many of the worldโs inhabitants. True, the Corona vaccine has not yet been found, development is under way, but think - who do you think will be the first to benefit from it?
What can you do now to become healthier, body, mind and spirit?
The main thought for today: a healthy mind in a healthy body in a healthy world
'''
,
"20:00:13": "image/C18/3.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 4
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 4 '''
4:{
"20:00:00": "image/C18/4.png"
,
"20:00:05":
'''
Good evening everyone ๐ *Welcome to the fourth day of the challenge - quality education*
Choose one of three tasks, the main thing is that you feel comfortable, committed and connected to the process.
1. *Watch the TED talk* - Linda Cliatt-Wayman: How to fix a broken school? Lead fearlessly, love hard | & reflect on what you saw.
https://youtu.be/Xe2nlti47kA
2. *Traveling books* - Write two ideas you read in a book on sustainability and send them to a friend in the challenge group. โค๏ธโค๏ธ My recommendation to you is to read the fantasy book: 'The Journey to the Kingdom of Ludoland'.
3. *Thank you letter* - Write a letter to the teacher (teacher for life) that influenced you, in which you will express all your feelings, the feelings you have. Write everything you want. You are of course welcome to share with the group what you have written. Feel free to send the letter to the same teacher and watch the magic take place. Feel free to share here.
May you all have a day full of Sustainability & Leadership ๐๐๐
โค๏ธ๐๐๐๐๐๐๐โค๏ธ
''',
"20:00:08":
'''
*Welcome to the fourth day of the challenge - QUALITY EDUCATION*
What if we teach our children how to think and not what to think?
What if we teach our children to question life, to question all?
What if we teach our children to realize their dreams?
Education is a basic human right, and is essential for achieving sustainable development. We can empower women, fight climate change, fight inequality, eradicate extreme poverty and more - only if we, the citizens, including the education system itself, commit to advancing education-related goals.
Goal #4 in the Global Goals states that by 2030, all boys and girls will complete primary and secondary education provided free of charge.
*What can you do now to assure quality education in your community?*
Everyone wants to be heard and no one is listening. The corona crisis has helped us understand that without the dominant involvement of society, which will influence decision-making politicians, there is no chance for humanity to survive on Earth. We can learn from crises, and especially from listening to others. Thus, the main thought for today is: *I listen to the other, I listen as I care.*
'''
,
"20:00:13": "image/C18/4.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 5
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 5 '''
5:{
"20:00:00": "image/C18/5.png"
,
"20:00:05":
'''
This task has arrived today for us to enjoy the weekend so you have time and peace of mind to concentrate properly .... I already feel that this group is special ๐
A magical weekend to all and please continue to celebrate *Sustainability & Leadership*
๐ธ๐ธ๐ธ๐ธ ๐ธ
For any question feel free to contact me in private and I am very happy to see that you take the process seriously and ask questions that help you grow and maintain new habits โค๏ธ
May we all have a calm weekend full of Sustainability & Leadership ๐๐ป
''',
"20:00:08":
'''
*Good afternoon and welcome to the 5th day of practicing Sustainability & Leadership* ๐ธ๐ธ๐ธ๐ธ ๐ธ
People are leaving and it's natural, we praised yourself for still being here ๐๐๐๐๐๐
I am personally proud of each and every one of you โค๏ธ โค๏ธ โค๏ธ โค๏ธ โค๏ธ
We continue the practice of appreciation ๐ค๐ฑ๐ธ๐ท๐น Day 5 - *Gender Equality* - Choose one of three, no matter what the main thing you will feel comfortable and feel committed and connected to the process.
Regarding today's task - *appreciation and not discrimination*
1. *Watch the TED talk* - Why Gender Equality Is Good for Everyone โ Men Included | Michael Kimmel | TED Talks | & reflect on what you saw.
https://youtu.be/7n9IOH0NvyY
2. *Observation* - Think of a case where you yourself felt gendered, or a case of gender discrimination that you encountered in your life. What did you do then and what would you do differently today? Feel free to share with the group what you wrote.
3. *A Thank you` Letter* - Write a letter of thanks, a letter of appreciation, to a person of the opposite sex close to your heart that you think has hurt you at some point in your life.โณ
Before writing a thank you letter, clear all the negative feelings about that person. If you still feel resentment, you can also write down all the negative feelings on a piece of paper and then burn or tear it.
The important thing is that when you write a letter of thanks / appreciation, you no longer feel resentment against that person.
Stop thinking who this person is. More than one person is conceivable, but we should only do the work with one.
The letter must be handwritten.
Good luck ๐
'''
,
"20:00:10":
'''
*Welcome to the fifth day of the challenge - GENDER EQUALITY*
For many years there was a clear division between the roles of the man and the roles of the woman. The man was the provider, and the woman was in charge of the household. Each gender had fairly clear dress and behavior rules. The male was considered more than the female. In recent decades, the situation has been balancing. The law safeguards the rights of women and prohibits discrimination on the grounds of gender, and many women reach senior positions in the economy. Yet the situation is far from optimal: women still earn less, are judged more on the basis of their appearance and suffer from harassment.
Gender equality is a basic human right that must not be violated, and the empowerment of women and girls is essential for strengthening economic growth, promoting social development and empowering the state as a whole.
*What do you do to fight gender inequality?*
Today we will focus on opportunities instead of obstacles. Today, we will focus on generosity instead of indifference, light instead of darkness, love instead of hatred, sustainability instead of chaos, abundance instead of lack, today we will focus on our choice to respect ourselves and others.
The main thought for today: *"We do not discriminate between people. We do not ask what a person's religion is, and what race he belongs to. He must be a human and for that we say enough"*. - Benjamin Zeev Herzl {From the book "Altneuland"}
'''
,
"20:00:13": "image/C18/5.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 6
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 6 '''
6:{
"20:00:00": "image/C18/6.png"
,
"20:00:05":
'''
*Welcome to the 6th day of the challenge - CLEAN WATER*
Attach the exercise to day 6 - *clean water* - choose at least one of three tasks, feel comfortable, feel committed and connected to the process.
I wish all of you to open your eyes to a new day full of abundance like a flowing stream โโโโโ
1. *Watch the TED talk* - A young scientist's quest for clean water | Deepika Kurup | & reflect on what you saw, then try to find water technologies developed in your country and share in the sharing group. ๐
https://youtu.be/AkUcaludrcI
2. *Write down your 5 flaws*, which you believe - stop or slow down your life flow, restrict or block you that do not allow you to get what you really want, whether it is material or not. ๐๐
Disadvantages you will want to improve / fix - and in their absence - everything will flow better. Try to concentrate and go as deep as possible. Look for the features and patterns that hold back the realization of your authentic and original potential. ๐๐๐๐๐๐๐๐๐๐๐๐๐
3. Practice the *"Law of Giving and the Flow of Life"* by giving a small gift to everyone you meet, it should not be an expensive or monetary gift, the gift can be a smile, a flower, a compliment from the heart, a helping hand or a blessing in the heart and .. Stay open to accept any gift offered to you. ๐๐๐
A good night full of Sustainability & Leadership ๐๐๐
Sharon Gal-Or
Ting Global
''',
"20:00:08":
'''
*Welcome to the 6th day of the challenge - CLEAN WATER*
The water problem is a growing global problem. It is intertwined with global warming, desertification processes, population growth, and its result is a series of wars around water, mass migration, and political change.
Access to water and sanitation are basic human rights. Global Goal #6 speaks to the fact that by year 2030, everyone will have equal global access to safe and affordable drinking water.
The easiest way to get what you want is to *create cycles in the flow of abundance* and help others get what they want and need.
The more you give, the more you will receive and grow.
The thought for today: *"Today and every day - I give what I want to attract into my life."*
'''
,
"20:00:13": "image/C18/6.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 7
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 7 '''
7:{
"20:00:00": "image/C18/7.png"
,
"20:00:05":
'''
This task has arrived today for us to enjoy the weekend so you have time and peace of mind to concentrate properly .... May we all have a calm weekend full of Sustainability & Leadership ๐๐ป
I already feel that this group is special ๐
A magical weekend to all and please continue to celebrate *Sustainability & Leadership*
For any question feel free to contact me in private and I am very happy to see that you take the process seriously and ask questions that help you grow and maintain new habits โค๏ธ
''',
"20:00:08":
'''
A stunning evening for everyone โค๏ธ
Choose at least one of three, no matter what the main thing you will feel comfortable and committed and connected to the process.
First of all, I am proud of you for persevering and reaching the 7th day ... Be proud of yourself, wrap yourself in an abundance of love for perseverance and the desire for a life of Sustainability & Leadership
*Energy without giving and receiving, does not flow.*
๐๐๐๐๐๐๐๐
1. *Watch the TED talk* - Why renewables canโt save the planet | Michael Shellenberger | TEDxDanubia | & reflect on what you saw. โค๏ธ
https://youtu.be/N-yALPEpV4w
2. *Save energy* - Write in a notebook, ways you can save energy in your life, and commit to them until the end of the challenge. E.g. not getting angry until the end ah โค๏ธ โค๏ธ
3. *Get rid of the barriers* - From the five inhibiting properties that you wrote down in the previous task, choose two that you feel draw the most energy from you, and that you would like to work on. Define your plan of action, by writing down at least two actions (daily concrete actions, or wide-ranging actions) that will help you get rid of the barriers. Implement them! โค๏ธ โค๏ธ โค๏ธ ๏ธ
'''
,
"20:00:10":
'''
*Welcome to the seventh day of the challenge - CLEAN ENERGY*
Energy is life. In recent years there has been a huge increase in our energy consumption. Almost all of the energy we use comes from the earth โ coal, gas and oil.
Today we have new and clean technologies at our disposal and they can divert development towards a green and sustainable energy production path. In general, implementing new energy solutions, as quickly as possible, is essential to reducing our excessive footprint and tackling climate change, one of the biggest threats to our survival.
*And now for energy in the spiritual dimension* - MC2 equals L love because energy in the highest dimension is love, and sustainability - means filling your life with love - not at the expense of future generations.
*Remember*: Before each choice you make, ask yourself, โHow will this choice make me and those affected by my choices feel?โ
Listen to your heart, hear the answer and give trust in the right choice - because energy in its highest dimension is pure love.
The main thought for today: *"Today I make great choices because they are made with pure love"*
'''
,
"20:00:13": "image/C18/7.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 8
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 8 '''
8:{
"20:00:00": "image/C18/8.png"
,
"20:00:05":
'''
*ECONOMIC GROWTH - Consciousness Abundance & Future Oriented Thinking*
Good evening to all โค๏ธ The 8th day is a founding and essential day when it comes to economic abundance ๐๐ธ๐ธ๐ธ
Take advantage of the challenge to create in your life everything that is accurate and true in the field of abundance that you seek to magnetize to you โค๏ธ
Choose at least one of the three tasks, the main thing is that you feel comfortable, committed and connected to the process. Note that Task 1 is from today until the end of the challenge.
1. *Watch the TED talk* - How the future of work is not โJobsโ | Rudy Karsan | TEDxCalgary | & reflect on what you saw.
https://youtu.be/sQNtu4kpd64
2. *Ting Ikigai* Want to find your Ikigai? Connect between different components of the three parts of the table to create a new value activity that you can work on. - Challenge yourself to ask 3 more friends the same question.
3. *Role play*: "Choose a character that connects to the kind of abundance you desire." From there, as dedicated actors you must begin to step into the shoes of the character and conduct your life as if you were the same character. How would you respond to conversations that come your way? What level of energy would you invest? What choices would she make?
Notice how we perceive ourselves as we conduct ourselves from this new perception and how those around us perceive us. How does it feel? Do things look different? The goal of the game is to teach us the extent to which our perception of reality has on the reality we experience.
From today until the end of the challenge you must practice the same character. *At the end of the practice, mark the day number and ๐ธ 8 each day from now until the end of the challenge.*
''',
"20:00:08":
'''
*Welcome to the 8th day of 18 days of Sustainability & Leadership challenge*
*Goal 8: Decent Work & Economic Growth*
About half the world's population still lives on the equivalent of $2 a day. And in too many places, the fact that a person has a job does not guarantee his ability to escape poverty. And in your country? With the outbreak of the corona crisis, the unemployment rate quickly climbed up dramatically - most are workers who have been expelled from their daily jobs. What solutions are offered by government ministries? What are the changes in work patterns that we are going to see?
On such days it is obligatory upon each and everyone of us to show creativity, unity and leadership, and to set a personal example and role model for our children and friends, to rebuild trust and to create a sense of security and abundance.
And now for *future-oriented thinking* - one of the most striking differences between successful people and those who are less successful, is not the amount of money they have, but future-oriented thinking, and their ability to act on time. When we, as one, imagine abundance and economic security - we can create anything we want.
*What if the solution to the economic crisis is - Universal Basic Income?*
*Imagine for a second* that the country provides you with Universal Basic Income (UBI), that will allow you economic security. An income that will alleviate the fear of existential worries and enable entrepreneurship and growth. Sounds imaginary? Once civil rights, the right to vote for women, the abolition of slavery and the establishment of the Jewish state - sounded utopian. Just like them, the idea of basic income sounds utopian on first reading and realistic and fair on second reading. Every citizen will receive a Universal Basic Income that allows for a dignified life, without having to worry about a shortage of food or shelter.
Sounds like Mission Impossible? Hard times require decisive action. We must join forces and find solutions.
And finally, an important note, especially in a time like this: Give what you want to receive. You must transmit to the conscious mind that you already have abundance in order to receive abundance. That is why donating and giving is extremely important. It's fascinating and it works miraculously. Maybe because in practice we are one.
The main thought for today: *"If you will it, it is no dream; and if you don't, a dream it is and a dream it will stay."* - Theodor Herzl. In other words, โwhen there is a will - there's a wayโ.
'''
,
"20:00:13": "image/C18/8.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 9
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 9 '''
9:{
"20:00:00": "image/C18/9.png"
,
"20:00:05":
'''
Good week to all and welcome to Day 9 ๐๐๐Industry, Innovation and Infrastructure - Choose at least one of three tasks, the main thing is that you feel comfortable with it and feel committed and connected to the process.
1. *Watch the video* - Modern times and write in a notebook what is your production line?
https://www.youtube.com/watch?v=6n9ESFJTnHs
''',
"20:00:08":
'''
In addition watch this *TED talk* โ The next manufacturing revolution is here | Olivier Scalabre| & reflect on what you saw.
https://youtu.be/AyWtIwwEgS0
2. *Aliens visiting Earth* - Imagine that aliens from a planet named Ting ๐ก๐ก๐ก have landed at your house and they offer you three wishes to renew things in your life and three more wishes to renew things in your country - what would you renew ?? โค๏ธโค๏ธ
3. *Digital pacifier* 1 hour mobile diet task - you must close/shut down your mobile phone for a continuous period of only 1 hour, during the day -each day, from today until the end of the challenge. If you selected this task, mark group ๐ฑ9 daily.
4. *Section 4* In the challenge of 3 tasks daily - this is an innovation J From today until the end of the challenge, write down 3 new ideas in your notebook every day, mark ๐ก9
A magical evening full of Sustainability, Leadership & Innovation ๐
'''
,
"20:00:10":
'''
*Welcome to the ninth day of the challenge - Goal 9: Industry, Innovation and Infrastructure*
Every industry must innovate, and properly functioning immune infrastructures are the building blocks for any thriving community.
*Innovation and infrastructure are also on the spiritual level* - on the personal level, it is to break free from the shackles of the past, from those prejudices, customs and habits that limit your growth. Innovation is re-inventing yourself, recognizing opportunities in a time of crisis, connecting with yourself, spirit and soul.
*It's time to make life-changing decisions!* - How? Focus your intention about what you want and need.
*Remember* - what we notice and focus on, expands in our lives, and our intention will help the universe to support the desired outcome. Yes, with focus and intention, you can change your life and reflect your desires more accurately.
The main thought for today: *"Focus is a key & Imagination is your wings"*
'''
,
"20:00:13": "image/C18/9.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 10
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 10 '''
10:{
"20:00:00": "image/C18/10.png"
,
"20:00:05":
'''
*Welcome to the 10th day of the challenge - REDUCED INEQUALITIES*
*Today we will practice listening*
A stunning evening for everyone ๐๐๐ We are on the 10th day, which means that you have been in this process for 10 days and I am proud of each and every one of you.
๐๐๐๐๐๐๐๐๐๐๐
We have always been educated to think in dual format. Us versus them. Right and left, rich and poor and even worse, such as national/local and global. So, in the coming day we will practice listening ๐๐ผ๐๏ธ๐.
Ting - Ting - Ting - One of the most difficult tasks nowadays is how to channel our way through all the voices that demand our attention - the voices of the state, security, economy, family, media (Internet, TV, radio, newspapers, advertisements, etc.), - and find the important for us and find ourselves among these.
To become a better person/leader, learn to listen first -โTINGโ.
The ancient Chinese word for listening, "TING", captures the spirit of listening:
Ears - to be attentive while the other speaks and not to think about what to have to say next.
Eyes - to be able to put ourselves in the place of the other and see things through his/her point of view.
Mind - the ability to understand the words of the other.
Heart - Only when we really care then we are really attentive.
Last word, if you need to remember one thing from this message, I want you to remember this: If you are experiencing difficulty, whatever it is, the solution is in dialogue, so immediately stop sending text messages, emailing and googling, and instead, internalize the principles of listening - TING่ฝ
The next few days will be special and full of gifts from the universe. Learn to listen and look around and notice them
๐พ๐ฟ๐บ๐๐๐ผ๐๏ธ๐๐ฒ๐ณ๐๐ผ
*Task for the tenth day of the challenge*
Today's lesson is to enjoy the day by listening genuinely.
Write in a notebook at least three ways you noticed things you had not noticed before (even small things).
Today's thought: *True listening is from the ears, the eyes and the heart.*
An amazing evening full of Sustainability, Leadership and true listening! ๐๐ผ๐๏ธ๐๐ฆ๐ฆ
''',
"20:00:08":
'''
*Goal 10 - REDUCED INEQUALITY*
Welcome to the tenth day of the 18 Days of Sustainability & Leadership of Ting Global.
Income and wealth inequality are severe and are becoming widespread throughout the world. 1% of the richest people in the world population currently control about 40% of the total global assets, while the poorest half hold only 1%. Simply put - most of the world's wealth is held by a very small group of people. What is the real meaning of these differences?
For countries to prosper, equality and growth must be the domain of the entire population. Regardless of gender, race, religion, belief or economic status. When we reach a reality in which each person is self-dependent and independent, then the whole world will be able to thrive - period.
In order to fight inequality between and within countries, we must promote inclusive business models that empower groups, which are on the margins of our communities.
Ask yourself "How can I help? How can I serve?"
To serve others with your 'gifts' is the highest expression of your destiny, when your creative expression matches the needs of those around you, abundance flows easily into your life, in exemplary divine order.
You can practice today, just constantly recognize your unique talents. Listen to your inner voice and ask yourself how you can use these 'gifts' to serve the world in the best way.
The main thought for today: *"I listen to my inner voice. The answers are inside me"* ๐๐ผ๐๏ธ๐
'''
,
"20:00:13": "image/C18/10.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 11
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 11 '''
11:{
"20:00:00": "image/C18/11.png"
,
"20:00:05":
'''
*Goal 11 - SUSTAINABLE CITIES AND COMMUNITIES*
Choose at least one of three tasks, no matter which one, the main thing is that you feel comfortable, committed and connected to the process.
1. *Watch the TED talk* - 7 principles for building better cities | Peter Calthorpe| & reflect on what you saw. https://youtu.be/IFjD3NMv6Kw
2. *Invent new environmental symbols* - Can you produce future environmental symbols / logos? and what about a personal symbol that is all you ??
Show the symbol you created to friends and try to see if they can understand what you meant. Were they able to understand? Wonderful. Did not succeed? Modify the icons so that they are more understandable, and show them again.
3. *Letter to the mayor* - Write a letter to your mayor, stating everything you want to change in the city. You are of course welcome to share with the group what you have written and even send it to your city mayor.
May you all have a day full of Sustainability & Leadership
โค๐๐๐๐๐๐๐โค
''',
"20:00:08":
'''
*Welcome to the eleventh day of the challenge - Cities and Sustainable Communities*
According to SDG 11 by 2050, 70% of the worldโs population will live in cities, thus making cities vital in achieving a sustainable future for the world.
I have a dream to live in a sustainable city that allows for a life of happiness and living with dignity for all, inside and outside it.
I have a dream to live in a city that wisely and fairly uses the infrastructure, material, natural, human and social resources at its disposal.
I have a dream to live in a city that takes responsibility for its role in managing global ecosystems, and responsibility for its physical and cultural heritage and future generations.
And you, in what city do you dream to live in?
The main thought for today: *"I have a dream as I care about my city, my country and the whole world."*
'''
,
"20:00:13": "image/C18/11.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 12
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 12 '''
12:{
"20:00:00": "image/C18/12.png"
,
"20:00:05":
'''
This task has arrived today for us to enjoy the weekend so you have time and peace of mind to concentrate properly .... May we all have a calm weekend full of Sustainability & Leadership ๐๐ป
*Task for the twelfth day ๐๐- RESPONSIBLE CONSUMPTION & PRODUCTION*
Challenge yourself to new habits without one-time and without bags.
Choose at least one of the three tasks, the main thing is that you feel comfortable and committed and connected to the process.
1. *Watch the TED talk* - a video about how the garbage of Lauren, a young New York woman and graduate of environmental studies, amounts to only one jar | & reflect on what you saw. https://www.youtube.com/watch?v=pF72px2R3Hg
2. *Environmental cleanliness* - cleaning of waste in the environment near me. Just take a bag and clean, do not ask anyone to clean, the others will see and see how they will imitate you. Taking a โbefore and afterโ photo will be great.
3. *I say โnoโ to disposable* - from today until the end of the challenge, do not use disposable - no drinking bottles, no disposable cups etc. *At the end of the practice, mark the day number and ๐ฅฅ 12 each day from now until the end of the challenge.*
''',
"20:00:08":
'''
*Welcome to the Twelfth Day of the Challenge - RESPONSIBLE CONSUMPTION & PRODUCTION*
Mother Earth has provided us with an abundance of natural resources. Today we consume resources at a pace far beyond what the earth is capable of providing us. We must learn how to use and produce sustainable alternatives and repair the damage we have done to the environment and if not THIS IS THE END.
*Tell me please?* Isn't it disgusting for you to go to the beach and sit on a mixture of sand and plastic garbage?
It's time to really change our habits and demand from the manufacturing companies to take responsibility, it's time to march in the streets and raise a โshoutโ and sign petitions because we deserve more.
Our world is being abused. We must start recycling, reducing, and producing as little garbage as possible. Because the garbage does not just disappear. It is just being swept under the world carpet or spills into the sea.
It is obligatory upon each of us to take a decision and begin to reduce *our personal ecological footprint* in order to enable the renewal of natural resources on which our lives, the future of our children and biodiversity depend.
The main thought for today: *I stop to think (3) three times before I consume*
'''
,
"20:00:13": "image/C18/12.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 13
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 13 '''
13:{
"20:00:00": "image/C18/13.png"
,
"20:00:05":
'''
*The task for the thirteenth day ๐๐ Climate Action*
Choose at least one of the three, no matter what the main thing is that you will feel comfortable and committed and connected to the process.
1. *Watch the TED talk* - School strike for climate - save the world by changing the rules | Greta Thunberg | TEDxStockholm | & reflect on what you saw.
https://youtu.be/EAmmUIEsN9A
A magical evening full of sustainability, leadership and passing on the good ๐
''',
"20:00:08":
'''
2. *So what's your ecological footprint?* - Feel free to play and test yourself. https://www.footprintcalculator.org/
3. *Sharing is caring* - Today's task and until the end of the challenge is to share the challenge with friends, even 3 people each day are enough, although the more you share with more people the more powerful the energy in my eyes. Remember to share, experiences, impressions, what you learned new and so on. The mark for sharing is emoji ๐ ๐ ๐ ๐ ๐ ๐
'''
,
"20:00:10":
'''
*Welcome to the thirteenth day of the challenge - Climate Action*
When I was a young student they talked about global warming. I remember coming out of the class scared and terrified that the end of the world was near. Yes, for over 30 years the scientific community has been warning of an increase in carbon dioxide levels in the atmosphere, while the global world population has nearly doubled. The growth rate of our emissions is rising fast, and it is rising even faster than we thought it was the worst case scenario just a few years ago, so yes it is right for me to be scared and terrified, and do something about it.
Greta Thunberg, at the age of 15, realized the lapse in what several climate experts were saying and in the actions that were being taken in society. The difference was so drastic in her opinion that she decided to take matters into her own hands.
*My goal and the goal of us all must be:*
1. Stimulate a dialogue on progress & technology - good? Bad? How can we tell?
2. Communicate the climate crisis to the younger generation
3. Involve today's generation in tomorrow's decision-making process
*Danger!* - Climate change is a real and inevitable danger to all of humanity. The effects of climate change are already evident and could be tragic, if we do not act now!
Through education, innovation and adherence to our responsibilities, the citizens, both adults and young people, we may be able to make the necessary changes to protect the continued existence of the human race.
The main thought for today: *"The climate crisis is also my challenge"*
'''
,
"20:00:13": "image/C18/13.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 14
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 14 '''
14:{
"20:00:00": "image/C18/14.png"
,
"20:00:05":
'''
This task has arrived today for us to enjoy the weekend so you have time and peace of mind to concentrate properly .... I already feel that this group is special ๐
A magical weekend to all and please continue to celebrate *Sustainability & Leadership*
''',
"20:00:08":
'''
*Task for the Fourteenth Day* ๐14 - *LIFE UNDERWATER*
Choose at least one of the three, the main thing is that you will feel comfortable and committed and connected to the process.
1. *Watch the TED talk* - Paul Snelgrove: A census of the ocean | & reflect on what you saw. https://youtu.be/PcDftBVDSlc
2. *The sea within us* - Find at home some item related to the sea (oyster you collected, toy or game with pictures of fish, doll, etc.), take a picture with it and share with 5 friends on WhatsApp or social media post and explain why you think it is beautiful and important.
ORโฆ
*The Other Whale* - Find a picture of whales you particularly liked on the Internet. Share with 5 friends on WhatsApp or a post on social media and explain why you think it's beautiful and important. By the way, did you find the other whale? ๐14
3. *Change Angels* - Today's mission and before the end of this challenge is to open your own * Challenge 18 Sustainability & Leadership group*. ๐This group does not require many members, even 3 people are enough, although the more people there are the more powerful the energy in my eyes.
Every day, you will have the task of passing the instructions from the day to the end of the 18 days, just as you received them and will continue to receive them from me.
You can use the previous messages I sent. Mark it with ๐คน๐ปโโโ๐ฅโ
Remember that you will need to send the message and exercises of the notebook every day, and follow the people in your group who did the tasks (or take out of the group) - practice giving.
'''
,
"20:00:10":
'''
*Welcome to the fourteenth day of the challenge - LIFE UNDERWATER*
Goal number 14 deals with the conservation of the oceans, seas and underwater life.
Today, more than half of the world's coral reefs are in danger of extinction. Oceans face the threat of marine and nutritional pollution, resource depletion and climate change, all of which are a result primarily of human actions and the consequences are coming to us as well. These create global socio-economic problems, including health, safety and economic risks.
Is it our responsibility to make sure that world leaders do their job faithfully to protect marine animals?
Is it our responsibility to support people who depend on the oceans, whether the dependence is on employment, resources or pleasure?
The main thought for today is, *โLife is a beautiful, magnificent thing, even to a jellyfish.โ*
'''
,
"20:00:13": "image/C18/14.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 15
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 15 '''
15:{
"20:00:00": "image/C18/15.png"
,
"20:00:05":
'''
*Task for the fifteenth day ๐๐ Life on land*
Allow yourself a quiet and calm time, take a deep breath ... if possible do the task in nature, it will be great ๐ธ๐๐ฆ๐ผ๐ป๐น๐ท๐๐บ๐โ๐๐ฑ๐ด๐ฒ
Choose at least one of the three, no matter what the main thing is that you will feel comfortable and committed and connected to the process.
1. *Watch the TED talk* - How trees talk to each other | Suzanne Simard | & reflect on what you saw.
https://youtu.be/Un2yBgIAxYs
2. *Plant a tree* or donate to a body that promotes tree planting in your area. Take a picture or film yourself and capture the magic.
3. *TSL Capsule Time-Sustainability-Leadership*: Write a letter requesting the universe, decorate it, write your name and date, and bury it in the ground in the capsule (small box, glass bottle and even in a flower pot). Remember: you plant hopes, expectations Dreams and the universe will grow them, define exactly what you want and when.
Note: It is also possible to forward the letter to a close person, such as a teacher or friend, who will keep a letter and return it to you a year later.
A magical evening full of *Sustainability & Leadership* ๐โค๏ธ๐
''',
"20:00:08":
'''
*Welcome to the fifteenth day of the challenge - life on land*
*Goal 15 - Protecting land ecosystems, restoring and promoting their sustainable use, sustainable forest management, fight against desertification, stopping and reversing land degradation, stopping the loss of biodiversity.*
We live in the midst of an age of species extinction, mostly man-made. If the current trends continue, about half of the species living today are expected to become extinct by the end of the century. Most extinct species are not known and therefore we do not know how their extinction will affect us. Renowned ecologist Paul Ehrlich likens this situation to passengers on a plane who disassemble screws in different parts of the plane while flying. The screw may attach the backrest to the seat, but it is also possible that disassembling the screw will impair engine operation. Just as there are many screws in the plane, so there are many species of living things on Earth and we have no way of predicting what the consequences of their extinction will be. What we do know is that we are destroying parts of the life-supporting systems on Earth, since life itself is what creates the life-supporting conditions of our planet.
Should we preserve a rainforest just because of the economic value of the drugs, and the potential food species hiding among its branches? Should we preserve the forest because it is part of the Earth's life support systems? And maybe the answer lies in our cultural values? How easily do we allow ourselves to ignore all the beauty and grandeur of nature? Do we really want to live in a world that is all artificial? What are the implications of such a situation for the mental well-being of human beings?
The main thought for today: *โMan is as the tree in the field - as the tree he strives up"*
'''
,
"20:00:13": "image/C18/15.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 16
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 16 '''
16:{
"20:00:00": "image/C18/16.png"
,
"20:00:05":
'''
*Welcome to the sixteenth day of the challenge ๐๐ - Peace, Justice and Strong Institutions*
Goal 16 reminds us that compassion and a strong moral compass are essential to any human democratic society. Persecution of the weak, corruption, injustice and abuse are still rampant and tearing apart the fabric of civilization. As hard as it is to admit, no country on earth exists in a true democracy. People have the illusion of choice and are actually managed and unrepresented. As long as it is a system run by humans there will always be corruption and injustice, it is time to advance legislation to integrate advanced technologies as part of the system of government. We must ensure that we have strong institutions, global standards of justice and a commitment to peace everywhere.
Choose one of three, no matter what the main thing is that you will feel comfortable and committed and connected to the process.
1. *Watch the TED talk* - Jody Williams: A realistic vision for world peace | & reflect on what you saw. I invite you to contact members of the challenge group whose phone number ends with the same number as yours and discuss with them the issue of social justice - cool?
https://youtu.be/FD6CqD1kV8s
2. *Looking Back* - Look back at your task notebook and read all the tasks you have performed. Check to see if you have any gaps in the challenges or anything to add.
''',
"20:00:08":
'''
3. *Sophie's song* - see the following text message
Read the story you will find below at least two or three times and then write down your personal reflections and thoughts in a notebook.
And even better - share the group if you want๐น๐น๐น everything that comes to your mind to write that fits the spirit of things.
*Sophie's Song*
Sophie was ๐จ. She woke up ๐ญ from her dream.
So she packed her ๐and set out on a journey to see the๐ ,
She first met a puppy ๐; He asked for ๐ง so she shared her ๐ง,
She met a boy ๐ฆ who was hungry, so she shared her ๐งฅ,
She met a ๐ค that had fallen from the ๐ณ. So she took off her ๐ and made a ๐ for the ๐ค,
She met an old man who was cold so she gave him her ๐งฅ,
She met a lost ๐ who was ๐ข so she sang her a happy ๐๐ถ๐ต๐ถ song,
When it started to ๐ง, she was sitting under a ๐ณ, she was thirsty, hungry, she was cold and she only had one ๐, but she was no longer ๐จ. Sophie fell asleep ๐ด.
She woke up with a ๐. Sophie was ๐.
'''
,
"20:00:10":
'''
Sophie's emotional journey consists of three stages: Fear -> Empathy -> Happiness. Symbolic release of the human spirit from darkness to light.
Task: Choose a person you think is unhappy / unwell, who has been complaining about life for some time and share Sophie's story with that person.
Then, write in a notebook: How was the sharing experience and what did it evoke in you? Did he / she answer you? Did you talk and how? Did the person thank you? How did sharing make you feel?
The main thought for today: *Justice-Justice will pursue*
A magical evening full of sustainability, leadership, peace, justice, strong institutions and passing on the good ๐
*Ludoland never dies- it is reborn*
Sharon Gal-Or
Global Cultural Ambassador
Ting Global
'''
,
"20:00:13": "image/C18/16.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 17
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 17 '''
17:{
"20:00:00": "image/C18/17.png"
,
"20:00:05":
'''
I am proud of you for persevering and reaching the 17th day of the challenge, another day ... Be proud of yourself, wrap yourself in an abundance of love for perseverance and the desire for a life of sustainability and leadership.
By the way, this is your chance to complete the missions you did not complete,
Have a lovely day,
Sharon
*Task for the Seventeenth Day ๐๐ - Partnerships for Achieving the Goals*
Choose at least one of the three tasks, no matter what the main thing is that you will feel comfortable and committed and connected to the process.
1. *Watch the video* - 3 steps to achieving the SDGs | Linda Midgley | TEDxAlkmaar | & reflect on what you saw.
https://youtu.be/-pryCzwDbXY
''',
"20:00:08":
'''
AND this video: Fly on life, then write your thoughts and insights in a notebook.
https://youtu.be/rlwfd1ZaDJ4
'''
,
"20:00:10":
'''
2. Read about a story *โNoah and the Flood"* - about how while they were in the arch it was time to rest, Noahโs family and the animals had time to chat about what the new world would look like when they descend the arch to land. Try to reply the questions at the end of the article.
https://blogs.timesofisrael.com/noahs-ark-the-global-goals/
'''
,
"20:00:13":
'''
Today's task: *Ting method for interdisciplinary thinking combined with coding symbols*
Contact group member/s - yes, do not be ashamed, even such friends that you do not know because we are climbing the same mountain, right here and right now, in our personal journey, and do the exercise together.
Feel free to share with us here ideas created during the process. ๐๐ธ๐
3. *Ting - Creative thinking of ideas for new ventures*
1. Choose three areas of interest / occupation
2. Break down each area into 3 associations that come to mind
3. Connect the areas of activity to inspire you to create an idea for a new venture.
ORโฆ
*Ting - Creative Thinking Finding Solutions to Problems*
Steps in creating the idea:
1. Define your personal problem or any general problem/challenge.
2. To which of the goals in the table of the goals of the existence of the universe, is the problem related?
3. Find three areas of activity related to the problem you have defined.
4. Break down each area into 3 associations that come to mind
5. Connect between the areas of activity, to find a solution to the problem you have defined.
6. Draw a symbolic representation of the solution.
You are of course also welcome to share with the group what you have written.
A magical evening full of *sustainability, leadership* and passing on the good ๐
Sharon Gal-Or
Global Cultural Ambassador
Ting Global
'''
,
"20:00:15":
'''
*Welcome to the seventeenth day of the challenge โ Partnership for the Goals*
*So what does collaboration mean?* The true meaning of collaboration is the joint performance of actions that connect us to a new significant abundance in places where it was not previously visible and could not be revealed when each party acted alone.
*This is a real crisis* and should be addressed accordingly and we must work to reduce the use of consumable energy sources and do one hand in resolving the climate crisis. If we want to survive on Earth we have about a decade, until 2030, to completely change what and how we teach in schools, consume energy, sustain agriculture, run water, talk to our neighbors and more. This is not a passing trend, but a global change in the way we teach, in the way we do business, in social and corporate responsibility, and long-term thinking.
*"We have ten years to repair the damage we have caused and prevent a climate crisis"*, the UN warns.
The main thought for today: *"If you want to go fast - go alone*
*If you want to get far - go together"*
'''
,
"20:00:17": "image/C18/17.opus"
,
"18:00:00":
'''
*Two hours left to complete today's tasks*
_Rasing humanity on a new path - it all starts with you_ ๐๐ธ๐
'''
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 18
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 18 '''
18:{
"20:00:00": "image/C18/18.png"
,
"20:00:05":
'''
*Welcome to the 18th Day of the Challenge โ For the FUTURE OF HUMANITY*
A lovely week to all of you all and thank you for persevering and succeeding on this amazing journey. I sincerely hope that you will continue to challenge yourself and that you will also lead many others to sustainability and leadership.
True, as you understand, it was just a quick and sweet tasting, a trailer for a movie called Life, in which we are not only the viewers but also the actors, who are looking for meaning, growth for achievements and a better world for all of us.
This is your year. Ask, declare, listen, smile more, start living, and see how you manage to grow each day with the choices and decisions you make, the way you spend your time, the people you hang out with and how you feel at any given moment.
For me, sustainability is first and foremost listening to the inner self, heart and mind, for me sustainability is living in the beings I have chosen for myself, joy, giving and listening, it all comes down to that.
I want to thank you again for participating in the challenge,
We are almost done ... another step and we reached the depths of this small and big journey.
In fact let's walk 18 festive steps, a reminder of our joint journey together. Imagine what kind of world you would like to live in? I would love to hear that in the group or in private,
You may say I'm dreamer
But I'm not the only one
I hope one day you will join me
And the world will be as one
''',
"20:00:08":
'''
How fun it is to be here. You are amazing! I'm happy and I congratulate you.
*The task for today* - Send this group a video or written message, preferably a video.
Introduce yourself, most of you do not know each other and it will be exciting to know who was part of the energy that supported us all.
Describe your experience from these 18 days of sustainability and leadership: emotions, observations, signs, surprises, what has changed inside and outside of you and so on.
If you want to resonate something in the universe this is the time!
If you have interesting ideas and projects share them here, this is important because this group may have people who are interested in what you do, the services you offer or your ideas.
Be creative and real when you describe yourself, do not be ashamed, the stage is all yours.
So I hope you enjoyed, I definitely feel privileged to go this way with you and I really enjoyed and grew, and I would love to see as many videos of you as possible or hear from you in writing about your experience, and even more, to hear that you have opened your own Challenge 18 group.
Continue a magical week full of sustainability, leadership and passing on the good ๐๐ธ๐๐๐๐ป
Sharon Gal-Or
Global Cultural Ambassador
Ting Global
For more information contact me: +972 55-9721123 | [email protected]
'''
,
"20:00:13": "image/C18/18.opus"
,
},
#################################################################
#################################################################
#################################################################
#
# ENTER CORRECT DAY MESSAGES FOR DAY 19
#
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# ''' DAY 19 '''
19:{
"10:00:00": "image/C18/19.png"
,
"10:00:05":
'''
THANK YOU FOR SUCCESSFULLY FINISHING THE CHALLENGE
๐ ๐ ๐
'''
},
}
#https://docs.google.com/document/d/1DH2D7TbobiQDnjnoJM_TQ7tFBn4GIiftSz1Awz_UHVg/edit
#XXX
|
the-stack_106_21330
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : ็ณป็ปๅ
้จไฝฟ็จๅทฅๅ
ท
Case Name : ไฝฟ็จgs_probackup add-instanceๅฝไปคๆทปๅ --remote-proto=noneๅๆฐ
Description :
1.ๅๅปบๅคไปฝ็ฎๅฝ
2.่ฟ่กๅๅงๅ
3.ๅจๅคไปฝ่ทฏๅพๅ
ๅๅงๅไธไธชๆฐ็ๅคไปฝๅฎไพ
4.ๆฅ็ๆฏๅฆ็ๆpg_probackup.conf้
็ฝฎๆไปถ
5.ๆธ
็็ฏๅข
Expect :
1.ๅๅปบๆๅ
2.ๅๅงๅๆๅ
3.ๅๅงๅๆฐ็ๅคไปฝๅฎไพๆๅ
4.็ๆpg_probackup.conf้
็ฝฎๆไปถ๏ผ่ฏฅๆไปถไฟๅญไบๆๅฎๆฐๆฎ็ฎๅฝpgdata-path็
gs_probackup่ฎพ็ฝฎ
5.ๆธ
็็ฏๅขๅฎๆ
History :
"""
import os
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class SystemInternalTools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('--Opengauss_Function_Tools_Gs_Probackup_Case0142start-')
self.constant = Constant()
self.Primary_Node = Node('PrimaryDbUser')
self.gs_probackup_path = os.path.join(macro.DB_INSTANCE_PATH,
'gs_probackup_testdir0142')
def test_system_internal_tools(self):
text = '--step1:ๅๅปบๅคไปฝ็ฎๅฝ;expect:ๅๅปบๆๅ----'
self.log.info(text)
mkdir_cmd = f'''if [ ! -d "{self.gs_probackup_path}" ]
then
mkdir -p {self.gs_probackup_path}
fi'''
primary_result = self.Primary_Node.sh(mkdir_cmd).result()
self.log.info(primary_result)
self.assertEqual(primary_result, '', 'ๆง่กๅคฑ่ดฅ:' + text)
text = '--step2:่ฟ่กๅๅงๅ;expect:ๅๅงๅๆๅ---'
self.log.info(text)
init_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup init -B {self.gs_probackup_path};"
self.log.info(init_cmd)
init_msg = self.Primary_Node.sh(init_cmd).result()
self.log.info(init_msg)
self.assertIn(self.constant.init_success, init_msg,
'ๆง่กๅคฑ่ดฅ:' + text)
text = '-step3:ๅจๅคไปฝ่ทฏๅพๅ
ๅๅงๅไธไธชๆฐ็ๅคไปฝๅฎไพ;' \
'expect:ๅๅงๅๆฐ็ๅคไปฝๅฎไพๆๅ-'
self.log.info(text)
init_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup add-instance " \
f"-B {self.gs_probackup_path} " \
f"-D {macro.DB_INSTANCE_PATH} " \
f"--instance=test_0142 " \
f"--remote-proto=none;"
self.log.info(init_cmd)
init_msg = self.Primary_Node.sh(init_cmd).result()
self.log.info(init_msg)
self.assertIn("'test_0142' " + self.constant.init_success, init_msg,
'ๆง่กๅคฑ่ดฅ:' + text)
text = '---step4:ๆฅ็pg_probackup.conf้
็ฝฎๆไปถ;' \
'expect:้
็ฝฎๆไปถๆทปๅ ๆฐๆฎ็ฎๅฝๆๅ---'
self.log.info(text)
cat_cmd = f"cat {self.gs_probackup_path}/backups/" \
f"test_0142/pg_probackup.conf"
self.log.info(cat_cmd)
cat_msg = self.Primary_Node.sh(cat_cmd).result()
self.log.info(cat_msg)
self.assertIn(f'pgdata = {macro.DB_INSTANCE_PATH}', cat_msg,
'ๆง่กๅคฑ่ดฅ:' + text)
def tearDown(self):
text = '---step5:ๆธ
็็ฏๅข;expect:ๆธ
็็ฏๅขๅฎๆ---'
self.log.info(text)
clear_cmd = f'rm -rf {self.gs_probackup_path}'
self.log.info(clear_cmd)
clear_msg = self.Primary_Node.sh(clear_cmd).result()
self.log.info(clear_msg)
self.log.info('-Opengauss_Function_Tools_Gs_Probackup_Case0142finish-')
|
the-stack_106_21331
|
'''As an open source project, we collect usage statistics to inform development priorities.
For more information, check out the docs at https://docs.dagster.io/install/telemetry/'
To see the logs we send, inspect $DAGSTER_HOME/logs/ if $DAGSTER_HOME is set or ~/.dagster/logs/
See class TelemetryEntry for logged fields.
For local development:
Spin up local telemetry server and set DAGSTER_TELEMETRY_URL = 'http://localhost:3000/actions'
To test RotatingFileHandler, can set MAX_BYTES = 500
'''
import datetime
import hashlib
import json
import logging
import os
import sys
import uuid
import zlib
from collections import namedtuple
from functools import wraps
from logging.handlers import RotatingFileHandler
import click
import requests
import six
import yaml
from dagster import check
from dagster.core.definitions.executable import ExecutablePipeline
from dagster.core.definitions.reconstructable import (
EPHEMERAL_NAME,
ReconstructablePipeline,
ReconstructableRepository,
)
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.instance import DagsterInstance
TELEMETRY_STR = '.telemetry'
INSTANCE_ID_STR = 'instance_id'
ENABLED_STR = 'enabled'
DAGSTER_HOME_FALLBACK = '~/.dagster'
DAGSTER_TELEMETRY_URL = 'http://telemetry.dagster.io/actions'
MAX_BYTES = 10485760 # 10 MB = 10 * 1024 * 1024 bytes
UPDATE_REPO_STATS = 'update_repo_stats'
START_DAGIT_WEBSERVER = 'start_dagit_webserver'
TELEMETRY_VERSION = '0.2'
# When adding to TELEMETRY_WHITELISTED_FUNCTIONS, please also update the literalinclude in
# docs/next/src/pages/install/telemetry.mdx
TELEMETRY_WHITELISTED_FUNCTIONS = {
'pipeline_execute_command',
'pipeline_launch_command',
'execute_pipeline',
}
def telemetry_wrapper(f):
'''
Wrapper around functions that are logged. Will log the function_name, client_time, and
elapsed_time, and success.
'''
if f.__name__ not in TELEMETRY_WHITELISTED_FUNCTIONS:
raise DagsterInvariantViolationError(
'Attempted to log telemetry for function {name} that is not in telemetry whitelisted '
'functions list: {whitelist}.'.format(
name=f.__name__, whitelist=TELEMETRY_WHITELISTED_FUNCTIONS
)
)
@wraps(f)
def wrap(*args, **kwargs):
start_time = datetime.datetime.now()
log_action(action=f.__name__ + '_started', client_time=start_time)
result = f(*args, **kwargs)
end_time = datetime.datetime.now()
log_action(
action=f.__name__ + '_ended',
client_time=end_time,
elapsed_time=end_time - start_time,
metadata={'success': getattr(result, 'success', None)},
)
return result
return wrap
def get_python_version():
version = sys.version_info
return "{}.{}.{}".format(version.major, version.minor, version.micro)
class TelemetryEntry(
namedtuple(
'TelemetryEntry',
'action client_time elapsed_time event_id instance_id pipeline_name_hash '
'num_pipelines_in_repo repo_hash python_version metadata version',
)
):
'''
Schema for telemetry logs.
Currently, log entries are coerced to the same schema to enable storing all entries in one DB
table with unified schema.
action - Name of function called i.e. `execute_pipeline_started` (see: fn telemetry_wrapper)
client_time - Client time
elapsed_time - Time elapsed between start of function and end of function call
event_id - Unique id for the event
instance_id - Unique id for dagster instance
pipeline_name_hash - Hash of pipeline name, if any
python_version - Python version
repo_hash - Hash of repo name, if any
num_pipelines_in_repo - Number of pipelines in repo, if any
metadata - More information i.e. pipeline success (boolean)
version - Schema version
If $DAGSTER_HOME is set, then use $DAGSTER_HOME/logs/
Otherwise, use ~/.dagster/logs/
'''
def __new__(
cls,
action,
client_time,
event_id,
instance_id,
elapsed_time=None,
pipeline_name_hash=None,
num_pipelines_in_repo=None,
repo_hash=None,
metadata=None,
):
action = check.str_param(action, 'action')
client_time = check.str_param(client_time, 'action')
elapsed_time = check.opt_str_param(elapsed_time, 'elapsed_time', '')
event_id = check.str_param(event_id, 'event_id')
instance_id = check.str_param(instance_id, 'instance_id')
metadata = check.opt_dict_param(metadata, 'metadata')
if action == UPDATE_REPO_STATS:
pipeline_name_hash = check.str_param(pipeline_name_hash, 'pipeline_name_hash')
num_pipelines_in_repo = check.str_param(num_pipelines_in_repo, 'num_pipelines_in_repo')
repo_hash = check.str_param(repo_hash, 'repo_hash')
else:
pipeline_name_hash = ''
num_pipelines_in_repo = ''
repo_hash = ''
return super(TelemetryEntry, cls).__new__(
cls,
action=action,
client_time=client_time,
elapsed_time=elapsed_time,
event_id=event_id,
instance_id=instance_id,
pipeline_name_hash=pipeline_name_hash,
num_pipelines_in_repo=num_pipelines_in_repo,
repo_hash=repo_hash,
python_version=get_python_version(),
metadata=metadata,
version=TELEMETRY_VERSION,
)
def _dagster_home_if_set():
dagster_home_path = os.getenv('DAGSTER_HOME')
if not dagster_home_path:
return None
return os.path.expanduser(dagster_home_path)
def get_dir_from_dagster_home(target_dir):
'''
If $DAGSTER_HOME is set, return $DAGSTER_HOME/<target_dir>/
Otherwise, return ~/.dagster/<target_dir>/
The 'logs' directory is used to cache logs before upload
The '.logs_queue' directory is used to temporarily store logs during upload. This is to prevent
dropping events or double-sending events that occur during the upload process.
The '.telemetry' directory is used to store the instance id.
'''
dagster_home_path = _dagster_home_if_set()
if dagster_home_path is None:
dagster_home_path = os.path.expanduser(DAGSTER_HOME_FALLBACK)
dagster_home_logs_path = os.path.join(dagster_home_path, target_dir)
if not os.path.exists(dagster_home_logs_path):
os.makedirs(dagster_home_logs_path)
return dagster_home_logs_path
def get_log_queue_dir():
'''
Get the directory where we store log queue files, creating the directory if needed.
The log queue directory is used to temporarily store logs during upload. This is to prevent
dropping events or double-sending events that occur during the upload process.
If $DAGSTER_HOME is set, return $DAGSTER_HOME/.logs_queue/
Otherwise, return ~/.dagster/.logs_queue/
'''
dagster_home_path = _dagster_home_if_set()
if dagster_home_path is None:
dagster_home_path = os.path.expanduser(DAGSTER_HOME_FALLBACK)
dagster_home_logs_queue_path = dagster_home_path + '/.logs_queue/'
if not os.path.exists(dagster_home_logs_queue_path):
os.makedirs(dagster_home_logs_queue_path)
return dagster_home_logs_queue_path
def _get_telemetry_logger():
logger = logging.getLogger('dagster_telemetry_logger')
if len(logger.handlers) == 0:
handler = RotatingFileHandler(
os.path.join(get_dir_from_dagster_home('logs'), 'event.log'),
maxBytes=MAX_BYTES,
backupCount=10,
)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
def write_telemetry_log_line(log_line):
logger = _get_telemetry_logger()
logger.info(json.dumps(log_line))
def _get_instance_telemetry_info():
dagster_telemetry_enabled = _get_instance_telemetry_enabled(DagsterInstance.get())
instance_id = None
if dagster_telemetry_enabled:
instance_id = _get_or_set_instance_id()
return (dagster_telemetry_enabled, instance_id)
def _get_instance_telemetry_enabled(instance):
return instance.telemetry_enabled
def _get_or_set_instance_id():
instance_id = _get_telemetry_instance_id()
if instance_id == None:
instance_id = _set_telemetry_instance_id()
return instance_id
# Gets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml
def _get_telemetry_instance_id():
telemetry_id_path = os.path.join(get_dir_from_dagster_home(TELEMETRY_STR), 'id.yaml')
if not os.path.exists(telemetry_id_path):
return
with open(telemetry_id_path, 'r') as telemetry_id_file:
telemetry_id_yaml = yaml.safe_load(telemetry_id_file)
if INSTANCE_ID_STR in telemetry_id_yaml and isinstance(
telemetry_id_yaml[INSTANCE_ID_STR], six.string_types
):
return telemetry_id_yaml[INSTANCE_ID_STR]
return None
# Sets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml
def _set_telemetry_instance_id():
click.secho(TELEMETRY_TEXT)
click.secho(SLACK_PROMPT)
telemetry_id_path = os.path.join(get_dir_from_dagster_home(TELEMETRY_STR), 'id.yaml')
instance_id = str(uuid.uuid4())
try: # In case we encounter an error while writing to user's file system
with open(telemetry_id_path, 'w') as telemetry_id_file:
yaml.dump({INSTANCE_ID_STR: instance_id}, telemetry_id_file, default_flow_style=False)
return instance_id
except Exception: # pylint: disable=broad-except
return '<<unable_to_write_instance_id>>'
def hash_name(name):
return hashlib.sha256(name.encode('utf-8')).hexdigest()
def log_repo_stats(instance, source, pipeline=None, repo=None):
check.inst_param(instance, 'instance', DagsterInstance)
check.str_param(source, 'source')
check.opt_inst_param(pipeline, 'pipeline', ExecutablePipeline)
check.opt_inst_param(repo, 'repo', ReconstructableRepository)
if _get_instance_telemetry_enabled(instance):
instance_id = _get_or_set_instance_id()
if isinstance(pipeline, ReconstructablePipeline):
pipeline_name_hash = hash_name(pipeline.get_definition().name)
repository = pipeline.get_reconstructable_repository().get_definition()
repo_hash = hash_name(repository.name)
num_pipelines_in_repo = len(repository.pipeline_names)
elif isinstance(repo, ReconstructableRepository):
pipeline_name_hash = ''
repository = repo.get_definition()
repo_hash = hash_name(repository.name)
num_pipelines_in_repo = len(repository.pipeline_names)
else:
pipeline_name_hash = hash_name(pipeline.get_definition().name)
repo_hash = hash_name(EPHEMERAL_NAME)
num_pipelines_in_repo = 1
write_telemetry_log_line(
TelemetryEntry(
action=UPDATE_REPO_STATS,
client_time=str(datetime.datetime.now()),
event_id=str(uuid.uuid4()),
instance_id=instance_id,
pipeline_name_hash=pipeline_name_hash,
num_pipelines_in_repo=str(num_pipelines_in_repo),
repo_hash=repo_hash,
metadata={'source': source},
)._asdict()
)
def log_action(action, client_time=datetime.datetime.now(), elapsed_time=None, metadata=None):
(dagster_telemetry_enabled, instance_id) = _get_instance_telemetry_info()
if dagster_telemetry_enabled:
# Log general statistics
write_telemetry_log_line(
TelemetryEntry(
action=action,
client_time=str(client_time),
elapsed_time=str(elapsed_time),
event_id=str(uuid.uuid4()),
instance_id=instance_id,
metadata=metadata,
)._asdict()
)
TELEMETRY_TEXT = '''
%(telemetry)s
As an open source project, we collect usage statistics to inform development priorities. For more
information, read https://docs.dagster.io/install/telemetry.
We will not see or store solid definitions, pipeline definitions, modes, resources, context, or
any data that is processed within solids and pipelines.
To opt-out, add the following to $DAGSTER_HOME/dagster.yaml, creating that file if necessary:
telemetry:
enabled: false
''' % {
'telemetry': click.style('Telemetry:', fg='blue', bold=True)
}
SLACK_PROMPT = '''
%(welcome)s
If you have any questions or would like to engage with the Dagster team, please join us on Slack
(https://bit.ly/39dvSsF).
''' % {
'welcome': click.style('Welcome to Dagster!', bold=True)
}
def upload_logs(stop_event):
'''Upload logs to telemetry server every hour, or when log directory size is > 10MB'''
try:
last_run = datetime.datetime.now() - datetime.timedelta(minutes=120)
dagster_log_dir = get_dir_from_dagster_home('logs')
dagster_log_queue_dir = get_dir_from_dagster_home('.logs_queue')
in_progress = False
while not stop_event.is_set():
log_size = 0
if os.path.isdir(dagster_log_dir):
log_size = sum(
os.path.getsize(os.path.join(dagster_log_dir, f))
for f in os.listdir(dagster_log_dir)
if os.path.isfile(os.path.join(dagster_log_dir, f))
)
log_queue_size = 0
if os.path.isdir(dagster_log_queue_dir):
log_queue_size = sum(
os.path.getsize(os.path.join(dagster_log_queue_dir, f))
for f in os.listdir(dagster_log_queue_dir)
if os.path.isfile(os.path.join(dagster_log_queue_dir, f))
)
if log_size == 0 and log_queue_size == 0:
return
if not in_progress and (
datetime.datetime.now() - last_run > datetime.timedelta(minutes=60)
or log_size >= MAX_BYTES
or log_queue_size >= MAX_BYTES
):
in_progress = True # Prevent concurrent _upload_logs invocations
last_run = datetime.datetime.now()
dagster_log_dir = get_dir_from_dagster_home('logs')
dagster_log_queue_dir = get_dir_from_dagster_home('.logs_queue')
_upload_logs(dagster_log_dir, log_size, dagster_log_queue_dir)
in_progress = False
stop_event.wait(600) # Sleep for 10 minutes
except Exception: # pylint: disable=broad-except
pass
def _upload_logs(dagster_log_dir, log_size, dagster_log_queue_dir):
'''Send POST request to telemetry server with the contents of $DAGSTER_HOME/logs/ directory '''
try:
if log_size > 0:
# Delete contents of dagster_log_queue_dir so that new logs can be copied over
for f in os.listdir(dagster_log_queue_dir):
# Todo: there is probably a way to try to upload these logs without introducing
# too much complexity...
os.remove(os.path.join(dagster_log_queue_dir, f))
os.rename(dagster_log_dir, dagster_log_queue_dir)
for curr_path in os.listdir(dagster_log_queue_dir):
curr_full_path = os.path.join(dagster_log_queue_dir, curr_path)
retry_num = 0
max_retries = 3
success = False
while not success and retry_num <= max_retries:
with open(curr_full_path, 'rb') as curr_file:
byte = curr_file.read()
data = zlib.compress(byte, zlib.Z_BEST_COMPRESSION)
headers = {'content-encoding': 'gzip'}
r = requests.post(DAGSTER_TELEMETRY_URL, data=data, headers=headers)
if r.status_code == 200:
success = True
retry_num += 1
if success:
os.remove(curr_full_path)
except Exception: # pylint: disable=broad-except
pass
|
the-stack_106_21332
|
from unittest import mock
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy as _
from django_dynamic_fixture import get
from textclassifier.validators import ClassifierValidator
from readthedocs.builds.constants import LATEST, STABLE, EXTERNAL
from readthedocs.builds.models import Version
from readthedocs.projects.constants import (
PRIVATE,
PRIVACY_CHOICES,
PROTECTED,
PUBLIC,
REPO_TYPE_GIT,
REPO_TYPE_HG,
)
from readthedocs.projects.exceptions import ProjectSpamError
from readthedocs.projects.forms import (
EmailHookForm,
EnvironmentVariableForm,
ProjectAdvancedForm,
ProjectBasicsForm,
ProjectExtraForm,
TranslationForm,
UpdateProjectForm,
WebHookForm,
)
from readthedocs.projects.models import EnvironmentVariable, Project
class TestProjectForms(TestCase):
@mock.patch.object(ClassifierValidator, '__call__')
def test_form_spam(self, mocked_validator):
"""Form description field fails spam validation."""
mocked_validator.side_effect = ProjectSpamError
data = {
'description': 'foo',
'documentation_type': 'sphinx',
'language': 'en',
}
form = ProjectExtraForm(data)
with self.assertRaises(ProjectSpamError):
form.is_valid()
def test_import_repo_url(self):
"""Validate different type of repository URLs on importing a Project."""
common_urls = [
# Invalid
('./path/to/relative/folder', False),
('../../path/to/relative/folder', False),
('../../path/to/@/folder', False),
('/path/to/local/folder', False),
('/path/to/@/folder', False),
('file:///path/to/local/folder', False),
('file:///path/to/@/folder', False),
('github.com/humitos/foo', False),
('https://github.com/|/foo', False),
('git://github.com/&&/foo', False),
# Valid
('git://github.com/humitos/foo', True),
('http://github.com/humitos/foo', True),
('https://github.com/humitos/foo', True),
('http://gitlab.com/humitos/foo', True),
('http://bitbucket.com/humitos/foo', True),
('ftp://ftpserver.com/humitos/foo', True),
('ftps://ftpserver.com/humitos/foo', True),
('lp:zaraza', True),
]
public_urls = [
('[email protected]:humitos/foo', False),
('ssh://[email protected]/humitos/foo', False),
('ssh+git://github.com/humitos/foo', False),
('[email protected]:strangeuser/readthedocs.git', False),
('[email protected]:22/_ssh/docs', False),
] + common_urls
private_urls = [
('[email protected]:humitos/foo', True),
('ssh://[email protected]/humitos/foo', True),
('ssh+git://github.com/humitos/foo', True),
('[email protected]:strangeuser/readthedocs.git', True),
('[email protected]:22/_ssh/docs', True),
] + common_urls
with override_settings(ALLOW_PRIVATE_REPOS=False):
for url, valid in public_urls:
initial = {
'name': 'foo',
'repo_type': 'git',
'repo': url,
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
with override_settings(ALLOW_PRIVATE_REPOS=True):
for url, valid in private_urls:
initial = {
'name': 'foo',
'repo_type': 'git',
'repo': url,
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
def test_empty_slug(self):
initial = {
'name': "''",
'repo_type': 'git',
'repo': 'https://github.com/user/repository',
}
form = ProjectBasicsForm(initial)
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors)
def test_changing_vcs_should_change_latest(self):
"""When changing the project's VCS, latest should be changed too."""
project = get(Project, repo_type=REPO_TYPE_HG, default_branch=None)
latest = project.versions.get(slug=LATEST)
self.assertEqual(latest.identifier, 'default')
form = ProjectBasicsForm(
{
'repo': 'http://github.com/test/test',
'name': 'name',
'repo_type': REPO_TYPE_GIT,
},
instance=project,
)
self.assertTrue(form.is_valid())
form.save()
latest.refresh_from_db()
self.assertEqual(latest.identifier, 'master')
def test_changing_vcs_should_not_change_latest_is_not_none(self):
"""
When changing the project's VCS,
we should respect the custom default branch.
"""
project = get(Project, repo_type=REPO_TYPE_HG, default_branch='custom')
latest = project.versions.get(slug=LATEST)
self.assertEqual(latest.identifier, 'custom')
form = ProjectBasicsForm(
{
'repo': 'http://github.com/test/test',
'name': 'name',
'repo_type': REPO_TYPE_GIT,
},
instance=project,
)
self.assertTrue(form.is_valid())
form.save()
latest.refresh_from_db()
self.assertEqual(latest.identifier, 'custom')
def test_length_of_tags(self):
data = {
'documentation_type': 'sphinx',
'language': 'en',
}
data['tags'] = '{},{}'.format('a'*50, 'b'*99)
form = ProjectExtraForm(data)
self.assertTrue(form.is_valid())
data['tags'] = '{},{}'.format('a'*90, 'b'*100)
form = ProjectExtraForm(data)
self.assertTrue(form.is_valid())
data['tags'] = '{},{}'.format('a'*99, 'b'*101)
form = ProjectExtraForm(data)
self.assertFalse(form.is_valid())
self.assertTrue(form.has_error('tags'))
error_msg = 'Length of each tag must be less than or equal to 100 characters.'
self.assertDictEqual(form.errors, {'tags': [error_msg]})
def test_strip_repo_url(self):
form = ProjectBasicsForm({
'name': 'foo',
'repo_type': 'git',
'repo': 'https://github.com/rtfd/readthedocs.org/'
})
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data['repo'],
'https://github.com/rtfd/readthedocs.org'
)
class TestProjectAdvancedForm(TestCase):
def setUp(self):
self.project = get(Project)
get(
Version,
project=self.project,
slug='public-1',
active=True,
privacy_level=PUBLIC,
identifier='public-1',
verbose_name='public-1',
)
get(
Version,
project=self.project,
slug='public-2',
active=True,
privacy_level=PUBLIC,
identifier='public-2',
verbose_name='public-2',
)
get(
Version,
project=self.project,
slug='public-3',
active=False,
privacy_level=PROTECTED,
identifier='public-3',
verbose_name='public-3',
)
get(
Version,
project=self.project,
slug='public-4',
active=False,
privacy_level=PUBLIC,
identifier='public/4',
verbose_name='public/4',
)
get(
Version,
project=self.project,
slug='private',
active=True,
privacy_level=PRIVATE,
identifier='private',
verbose_name='private',
)
get(
Version,
project=self.project,
slug='protected',
active=True,
privacy_level=PROTECTED,
identifier='protected',
verbose_name='protected',
)
def test_list_only_active_versions_on_default_version(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created automatically by the project on save
self.assertTrue(self.project.versions.filter(slug=LATEST).exists())
self.assertEqual(
{
slug
for slug, _ in form.fields['default_version'].widget.choices
},
{'latest', 'public-1', 'public-2', 'private', 'protected'},
)
def test_default_version_field_if_no_active_version(self):
project_1 = get(Project)
project_1.versions.filter(active=True).update(active=False)
# No active versions of project exists
self.assertFalse(project_1.versions.filter(active=True).exists())
form = ProjectAdvancedForm(instance=project_1)
self.assertTrue(form.fields['default_version'].widget.attrs['readonly'])
self.assertEqual(form.fields['default_version'].initial, 'latest')
def test_hide_protected_privacy_level_new_objects(self):
"""
Test PROTECTED is only allowed in old objects.
New projects are not allowed to set the privacy level as protected.
"""
# New default object
project = get(Project)
form = ProjectAdvancedForm(instance=project)
privacy_choices = list(PRIVACY_CHOICES)
privacy_choices.remove((PROTECTED, _('Protected')))
self.assertEqual(form.fields['privacy_level'].choices, privacy_choices)
# "Old" object with privacy_level previously set as protected
project = get(
Project,
privacy_level=PROTECTED,
)
form = ProjectAdvancedForm(instance=project)
self.assertEqual(form.fields['privacy_level'].choices, list(PRIVACY_CHOICES))
class TestProjectAdvancedFormDefaultBranch(TestCase):
def setUp(self):
self.project = get(Project)
user_created_stable_version = get(
Version,
project=self.project,
slug='stable',
active=True,
privacy_level=PUBLIC,
identifier='ab96cbff71a8f40a4340aaf9d12e6c10',
verbose_name='stable',
)
get(
Version,
project=self.project,
slug='public-1',
active=True,
privacy_level=PUBLIC,
identifier='public-1',
verbose_name='public-1',
)
get(
Version,
project=self.project,
slug='private',
active=True,
privacy_level=PRIVATE,
identifier='private',
verbose_name='private',
)
get(
Version,
project=self.project,
slug='protected',
active=True,
privacy_level=PROTECTED,
identifier='protected',
verbose_name='protected',
)
def test_list_only_non_auto_generated_versions_in_default_branch_choices(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created automatically by the project on save
latest = self.project.versions.filter(slug=LATEST)
self.assertTrue(latest.exists())
# show only the versions that are not auto generated as choices
self.assertEqual(
{
identifier
for identifier, _ in form.fields['default_branch'].widget.choices
},
{
None, 'stable', 'public-1', 'protected', 'private',
},
)
# Auto generated version `latest` should not be among the choices
self.assertNotIn(
latest.first().verbose_name,
[identifier for identifier, _ in form.fields[
'default_branch'].widget.choices],
)
def test_list_user_created_latest_and_stable_versions_in_default_branch_choices(self):
self.project.versions.filter(slug=LATEST).first().delete()
user_created_latest_version = get(
Version,
project=self.project,
slug='latest',
active=True,
privacy_level=PUBLIC,
identifier='ab96cbff71a8f40a4240aaf9d12e6c10',
verbose_name='latest',
)
form = ProjectAdvancedForm(instance=self.project)
# This version is created by the user
latest = self.project.versions.filter(slug=LATEST)
# This version is created by the user
stable = self.project.versions.filter(slug=STABLE)
self.assertIn(
latest.first().verbose_name,
[identifier for identifier, _ in form.fields[
'default_branch'].widget.choices],
)
self.assertIn(
stable.first().verbose_name,
[identifier for identifier, _ in form.fields[
'default_branch'].widget.choices],
)
def test_commit_name_not_in_default_branch_choices(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created by the user
latest = self.project.versions.filter(slug=LATEST)
# This version is created by the user
stable = self.project.versions.filter(slug=STABLE)
# `commit_name` can not be used as the value for the choices
self.assertNotIn(
latest.first().commit_name,
[identifier for identifier, _ in form.fields[
'default_branch'].widget.choices],
)
self.assertNotIn(
stable.first().commit_name,
[identifier for identifier, _ in form.fields[
'default_branch'].widget.choices],
)
def test_external_version_not_in_default_branch_choices(self):
external_version = get(
Version,
identifier='pr-version',
verbose_name='pr-version',
slug='pr-9999',
project=self.project,
active=True,
type=EXTERNAL,
privacy_level=PUBLIC,
)
form = ProjectAdvancedForm(instance=self.project)
self.assertNotIn(
external_version.verbose_name,
[identifier for identifier, _ in form.fields[
'default_branch'].widget.choices],
)
class TestTranslationForms(TestCase):
def setUp(self):
self.user_a = get(User)
self.project_a_es = self.get_project(lang='es', users=[self.user_a])
self.project_b_en = self.get_project(lang='en', users=[self.user_a])
self.project_c_br = self.get_project(lang='br', users=[self.user_a])
self.project_d_ar = self.get_project(lang='ar', users=[self.user_a])
self.project_e_en = self.get_project(lang='en', users=[self.user_a])
self.user_b = get(User)
self.project_f_ar = self.get_project(lang='ar', users=[self.user_b])
self.project_g_ga = self.get_project(lang='ga', users=[self.user_b])
self.project_s_fr = self.get_project(
lang='fr',
users=[self.user_b, self.user_a],
)
def get_project(self, lang, users, **kwargs):
return get(
Project, language=lang, users=users,
main_language_project=None, **kwargs
)
def test_list_only_owner_projects(self):
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_b_en,
self.project_c_br,
self.project_d_ar,
self.project_e_en,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects},
)
form = TranslationForm(
{'project': self.project_g_ga.slug},
parent=self.project_f_ar,
user=self.user_b,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_g_ga,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects},
)
def test_excludes_existing_translations(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_d_ar.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_d_ar,
self.project_e_en,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects},
)
def test_user_cant_add_other_user_project(self):
form = TranslationForm(
{'project': self.project_f_ar.slug},
parent=self.project_b_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Select a valid choice',
''.join(form.errors['project']),
)
self.assertNotIn(
self.project_f_ar,
[proj_slug for proj_slug, _ in form.fields['project'].choices],
)
def test_user_cant_add_project_with_same_lang(self):
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_e_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Both projects can not have the same language (English).',
''.join(form.errors['project']),
)
def test_user_cant_add_project_with_same_lang_of_other_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_e_en.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'This project already has a translation for English.',
''.join(form.errors['project']),
)
def test_no_nesting_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_c_br,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Select a valid choice',
''.join(form.errors['project']),
)
def test_no_nesting_translation_case_2(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_a_es.slug},
parent=self.project_c_br,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'A project with existing translations can not',
''.join(form.errors['project']),
)
def test_not_already_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_c_br.slug},
parent=self.project_b_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'is already a translation',
''.join(form.errors['project']),
)
def test_cant_change_language_to_translation_lang(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
# Parent project tries to change lang
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'en',
},
instance=self.project_a_es,
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "en" translation',
''.join(form.errors['language']),
)
# Translation tries to change lang
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'es',
},
instance=self.project_b_en,
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "es" translation',
''.join(form.errors['language']),
)
# Translation tries to change lang
# to the same as its sibling
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'br',
},
instance=self.project_b_en,
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "br" translation',
''.join(form.errors['language']),
)
def test_can_change_language_to_self_lang(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
# Parent project tries to change lang
form = UpdateProjectForm(
{
'repo': 'https://github.com/test/test',
'repo_type': self.project_a_es.repo_type,
'name': self.project_a_es.name,
'documentation_type': 'sphinx',
'language': 'es',
},
instance=self.project_a_es,
)
self.assertTrue(form.is_valid())
# Translation tries to change lang
form = UpdateProjectForm(
{
'repo': 'https://github.com/test/test',
'repo_type': self.project_b_en.repo_type,
'name': self.project_b_en.name,
'documentation_type': 'sphinx',
'language': 'en',
},
instance=self.project_b_en,
)
self.assertTrue(form.is_valid())
class TestNotificationForm(TestCase):
def setUp(self):
self.project = get(Project)
def test_webhookform(self):
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
'url': 'http://www.example.com/'
}
form = WebHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.webhook_notifications.all().count(), 1)
data = {
'url': 'https://www.example.com/'
}
form = WebHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.webhook_notifications.all().count(), 2)
def test_wrong_inputs_in_webhookform(self):
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
'url': ''
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'url': ['This field is required.']})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
'url': 'wrong-url'
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'url': ['Enter a valid URL.']})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
def test_emailhookform(self):
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
data = {
'email': '[email protected]'
}
form = EmailHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.emailhook_notifications.all().count(), 1)
def test_wrong_inputs_in_emailhookform(self):
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
data = {
'email': 'wrong_email@'
}
form = EmailHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'email': ['Enter a valid email address.']})
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
data = {
'email': ''
}
form = EmailHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'email': ['This field is required.']})
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
class TestProjectEnvironmentVariablesForm(TestCase):
def setUp(self):
self.project = get(Project)
def test_use_invalid_names(self):
data = {
'name': 'VARIABLE WITH SPACES',
'value': 'string here',
}
form = EnvironmentVariableForm(data, project=self.project)
self.assertFalse(form.is_valid())
self.assertIn(
"Variable name can't contain spaces",
form.errors['name'],
)
data = {
'name': 'READTHEDOCS__INVALID',
'value': 'string here',
}
form = EnvironmentVariableForm(data, project=self.project)
self.assertFalse(form.is_valid())
self.assertIn(
"Variable name can't start with READTHEDOCS",
form.errors['name'],
)
data = {
'name': 'INVALID_CHAR*',
'value': 'string here',
}
form = EnvironmentVariableForm(data, project=self.project)
self.assertFalse(form.is_valid())
self.assertIn(
'Only letters, numbers and underscore are allowed',
form.errors['name'],
)
data = {
'name': '__INVALID',
'value': 'string here',
}
form = EnvironmentVariableForm(data, project=self.project)
self.assertFalse(form.is_valid())
self.assertIn(
"Variable name can't start with __ (double underscore)",
form.errors['name'],
)
get(EnvironmentVariable, name='EXISTENT_VAR', project=self.project)
data = {
'name': 'EXISTENT_VAR',
'value': 'string here',
}
form = EnvironmentVariableForm(data, project=self.project)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a variable with this name for this project',
form.errors['name'],
)
def test_create(self):
data = {
'name': 'MYTOKEN',
'value': 'string here',
}
form = EnvironmentVariableForm(data, project=self.project)
form.save()
self.assertEqual(EnvironmentVariable.objects.count(), 1)
self.assertEqual(EnvironmentVariable.objects.first().name, 'MYTOKEN')
self.assertEqual(EnvironmentVariable.objects.first().value, "'string here'")
data = {
'name': 'ESCAPED',
'value': r'string escaped here: #$\1[]{}\|',
}
form = EnvironmentVariableForm(data, project=self.project)
form.save()
self.assertEqual(EnvironmentVariable.objects.count(), 2)
self.assertEqual(EnvironmentVariable.objects.first().name, 'ESCAPED')
self.assertEqual(EnvironmentVariable.objects.first().value, r"'string escaped here: #$\1[]{}\|'")
|
the-stack_106_21333
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'reconchess'
copyright = '2018, Corey Lowman, Casey Richardson'
author = 'Corey Lowman, Casey Richardson'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinxcontrib.httpdomain',
]
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'chess': ('https://python-chess.readthedocs.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'reconchessdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'reconchess.tex', 'reconchess Documentation',
'Corey Lowman, Casey Richardson', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'reconchess', 'reconchess Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'reconchess', 'reconchess Documentation',
author, 'reconchess', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
the-stack_106_21334
|
# Homework Assignment 06
# from Aishwarya Shirbhate 20005242
# Pseudocode #
# Define input_check function with check, answer, number of wins and history input
# In the function
# {
# Initialise variables maximum attempts and user inputs.
# Define symbol list which needs to be excluded from the user input.
# While maximum attempts < 6,
# ask for user input.
# if length of user input = 0
# print "You have now ended the game\n Thank you for playing Wordle\n "
# call gameStats function
# for i in history[i] = 0
# update check as exit
# break
# if length of user input is 5,
# if symbols not in user input
# if user input is uniquer than previous responses,
# call wordGuesser function
# else print prompt "Provided word is used previously. Please enter a new word"
# else print prompt "Input word contains symbols/characters. Please input words with alphabets only"
# else print prompt "Input word length needs to be 5 alphabets long"
#
# if all alphabets are in correct order,
# game is over. print "Woah! You guessed it right in attempt" with maximum attempts used to the user
# if maximum_guesses in number_of_wins:
# number_of_wins[maximum_guesses] += 1
# else:
# number_of_wins[maximum_guesses] = 1
#
# update history[won] by 1
# print "Guess Distribution is:" and number_of_wins
# if length of user input = 0
# then print "you have now ended the game. Thank you for playing Wordle "
# print "Guess Distribution is:" and number_of_wins
# if input word is not same as given word
# then continue function till maximum attempts are <= 6
# else game is over. print "You have used up your guesses" and the correct word to the user.
# update history["loss"] by 1
# print "Guess Distribution is:" and number_of_wins
# call game_stats(history) function
# log user input, no of wins and answer
# return check, number_of_wins, history
# end
# }
# define gameStats function with history as input
# let lost_games = history["loss"]
# let won_games = history["won"]
# let total_games = won_games + lost_games
# let win_prec = (won_games/total_games) * 100
# print "total no of games won are:" and won_games
# print "total no of games lost are:" and lost_games
# print "winning percentage are:" and win_prec
# log total games and win perc
# define main wordle function
# in the main function {
# initialise check = "play"
# let number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
# let history = {"won": 0, "loss": 0}
# define valid words list
# while check = "play"
# call dictionary module and assign englishword to answer
# if type(answer) == 'NoneType'
# raise Exception
# if answer is not in valid_words
# valid_words.append(answer)
# call input_check function
# print('*' * 100)
# if len(valid_words) == 1379 then
# valid_words = []
# end }
# call wordle function
import HW06_Aishwarya_Shirbhate_dictionary_final as Module_dictionary
import HW06_Aishwarya_Shirbhate_wordle_final as Module_wordle
import json
import sys
import logging
print("*** WORDLE GAME ***")
logging.basicConfig(filename='gameplay.log', filemode='a', format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def input_check(check, answer, number_of_wins, history):
"""
Checks user input is in proper format and implement word_guesser function.
:param history:
:param number_of_wins:
:param check:
:param answer:
:return check:
:return number_of_wins
:return history
"""
try:
symbol_list = {'~', ':', "'", '+', '[', '\\', '@', '^', '{', '%', '(', '-', '"', '*', '|', ',', '&', '<', '`',
'}',
'.', '_', '=', ']', '!', '>', ';', '?', '#', '$', ')', '/', ' ', '1', '2', '3', '4', '5', '6',
'7',
'8',
'9',
'0'} # symbol list that needs to be excluded
# array for user input
user_inputs = []
maximum_guesses = 0
correct_guess = False
# while loop for maximum guesses
while maximum_guesses < 6 and not correct_guess:
guess = input("Please enter a 5 letter word and press Enter:")
if len(guess) == 0:
print("You have now ended the game\n Thank you for playing Wordle\n ")
game_stats(history)
for i in history:
history[i] = 0
check = "exit"
break
symbol_check = [ele for ele in symbol_list if (ele in guess)]
bool_symbolcheck = bool(symbol_check)
if len(guess) == 5:
if bool_symbolcheck is False:
if guess not in user_inputs:
if guess in Module_dictionary.func_dict():
user_inputs.append(guess)
maximum_guesses += 1
lower_guess = guess.lower()
correct_guess = Module_wordle.wordGuesser(answer, lower_guess)[0] # calling above function
else:
print("please provide a valid dictionary word")
else:
print("Provided word is used previously. Please enter a new word")
else:
print("Input word contains symbols/characters. Please input words with alphabets only")
else:
print("Input word length needs to be 5 alphabets long")
if correct_guess:
print("Woah! You guessed it right in attempt", maximum_guesses)
if maximum_guesses in number_of_wins:
number_of_wins[maximum_guesses] += 1
else:
number_of_wins[maximum_guesses] = 1
history["won"] += 1
print("Guess Distribution is:", number_of_wins) # displays guess distribution of the game
game_stats(history)
elif len(guess) == 0:
print("Guess Distribution is:", number_of_wins) # displays guess distribution of the game
print("You have now ended the game\nThank you for playing Wordle\n ")
else:
print("You have used up your guesses")
print("The correct answer is", answer)
history["loss"] += 1
print("Guess Distribution is:", number_of_wins) # displays guess distribution of the game
game_stats(history)
logging.info("Input words: "+str(user_inputs))
logging.info("\nactual answer:\n" + str(answer))
logging.info("\nnumber of games won is: \n")
logging.info(str(number_of_wins))
return [check, number_of_wins, history]
except:
print("Error:", sys.exc_info(), " in input check function")
def game_stats(history):
"""
displays statistics for current session of the game
:param history:
"""
try:
lost_games = history["loss"]
won_games = history["won"]
total_games = won_games + lost_games
print("total no of games played are:", total_games) # displays total number of games played
if total_games != 0:
win_prec = (won_games / total_games) * 100
print("winning percentage are:", win_prec) # displays winning percentage
logging.info("\n****************************************************************\n")
logging.info("Total games played: " + str(total_games))
logging.info("\nwin percentage: " + str(win_prec))
return total_games, win_prec
else:
win_prec = 0
print("winning percentage are:", 0) # displays winning percentage
textfile = open("gameplaylog.txt", "a")
logging.info("\n****************************************************************\n")
logging.info("Total games played: " + str(total_games))
logging.info("\nwin percentage: " + str(win_prec))
return total_games, win_prec
except:
print("Error:", sys.exc_info(), " in game stats function")
def wordle():
"""
Checks for check value and executes wordle game by calling input_check function.
"""
try:
check = "play"
number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
valid_words = []
while check == "play":
answer = Module_dictionary.func_englishword() # assign english word to answer
if type(answer) == 'NoneType':
raise Exception("not valid word") # raise exception if file is not present
if answer not in valid_words:
valid_words.append(answer)
check, number_of_wins, history = input_check(check, answer, number_of_wins, history)
print('*' * 100)
if len(valid_words) == 1379: # if all words are used reset the word list
valid_words = []
except:
print("Error:", sys.exc_info(), " in wordle function")
if __name__ == '__main__':
wordle()
|
the-stack_106_21335
|
from typing import Dict, Any, NamedTuple, Type, Iterable, Tuple
from ..models import (
GraphTaskModel,
NodeMulticlassTask,
GraphRegressionTask,
GraphBinaryClassificationTask,
QM9RegressionTask,
)
from ..data import GraphDataset, JsonLGraphPropertyDataset, QM9Dataset, PPIDataset
class TaskInfo(NamedTuple):
"""A named tuple to hold information about a task."""
name: str
dataset_class: Type[GraphDataset]
dataset_default_hypers: Dict[str, Any]
model_class: Type[GraphTaskModel]
model_default_hypers: Dict[str, Any]
TASK_NAME_TO_DATASET_AND_MODEL_INFO: Dict[str, TaskInfo] = {}
def register_task(
task_name, dataset_class, dataset_default_hypers, model_class, model_default_hypers
):
TASK_NAME_TO_DATASET_AND_MODEL_INFO[task_name.lower()] = TaskInfo(
name=task_name,
dataset_class=dataset_class,
dataset_default_hypers=dataset_default_hypers,
model_class=model_class,
model_default_hypers=model_default_hypers,
)
def clear_known_tasks() -> None:
TASK_NAME_TO_DATASET_AND_MODEL_INFO.clear()
def get_known_tasks() -> Iterable[str]:
for task_info in TASK_NAME_TO_DATASET_AND_MODEL_INFO.values():
yield task_info.name
def task_name_to_dataset_class(name: str) -> Tuple[Type[GraphDataset], Dict[str, Any]]:
"""
Map task name to a dataset class and default hyperparameters for that class.
"""
task_info = TASK_NAME_TO_DATASET_AND_MODEL_INFO.get(name.lower())
if task_info is None:
raise ValueError("Unknown task type '%s'" % name)
return task_info.dataset_class, task_info.dataset_default_hypers
def task_name_to_model_class(name: str) -> Tuple[Type[GraphTaskModel], Dict[str, Any]]:
"""
Map task name to a model class and default hyperparameters for that class.
"""
task_info = TASK_NAME_TO_DATASET_AND_MODEL_INFO.get(name.lower())
if task_info is None:
raise ValueError("Unknown task type '%s'" % name)
return task_info.model_class, task_info.model_default_hypers
# Register some default tasks:
register_task(
task_name="PPI",
dataset_class=PPIDataset,
dataset_default_hypers={},
model_class=NodeMulticlassTask,
model_default_hypers={},
)
register_task(
task_name="QM9",
dataset_class=QM9Dataset,
dataset_default_hypers={},
model_class=QM9RegressionTask,
model_default_hypers={},
)
register_task(
task_name="GraphRegression",
dataset_class=JsonLGraphPropertyDataset,
dataset_default_hypers={"threshold_for_classification": None},
model_class=GraphRegressionTask,
model_default_hypers={},
)
register_task(
task_name="GraphBinaryClassification",
dataset_class=JsonLGraphPropertyDataset,
dataset_default_hypers={"threshold_for_classification": 23.0},
model_class=GraphBinaryClassificationTask,
model_default_hypers={},
)
|
the-stack_106_21336
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='ReVal',
version='v0.7.0',
packages=find_packages(),
include_package_data=True,
license='CC0-1.0',
description='Django app to upload, validate, review, and accept data files',
long_description=README,
url='https://github.com/18F/ReVAL',
author='18F',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'License :: Public Domain',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication'
],
install_requires=[
'django>=2.0,<3.0',
'djangorestframework',
'dj-database-url',
'goodtables',
'json_logic_qubit',
'psycopg2-binary',
'pyyaml',
'requests',
],
)
|
the-stack_106_21337
|
import unittest
from healthvaultlib.tests import settings
from healthvaultlib.helpers.connection import Connection
from healthvaultlib.exceptions.healthserviceexception import HealthServiceException
from healthvaultlib.methods.newapplicationcreationinfo import NewApplicationCreationInfo
class TestNewApplicationCreationInfo(unittest.TestCase):
def setUp(self):
self.connection = Connection(settings.SODA_MASTER_APPID, settings.HV_SERVICE_SERVER)
def test_newapplicationcreationinfo(self):
method = NewApplicationCreationInfo()
method.execute(self.connection)
response = method.response
self.assertIsNotNone(response.app_id)
self.assertIsNotNone(response.app_token)
self.assertIsNotNone(response.shared_secret)
def test_newapplicationcreationinfo_nonmaster(self):
method = NewApplicationCreationInfo()
self.connection.applicationid = settings.HV_APPID
with self.assertRaises(HealthServiceException):
method.execute(self.connection)
|
the-stack_106_21338
|
#!/usr/bin/env python
# pypreprocessor.py
__author__ = 'Evan Plaice'
__coauthor__ = 'Hendi O L, Epikem, num0005, tcumby'
__version__ = '0.7.9'
import sys
import os
import traceback
import imp
import io
class preprocessor:
def __init__(self, inFile=sys.argv[0], outFile='', defines=[], \
removeMeta=False, escapeChar=None, mode=None, escape='#', \
run=True, resume=False, save=True):
# public variables
self.defines = defines
self.input = inFile
self.output = outFile
self.removeMeta = removeMeta
self.escapeChar = escapeChar
self.mode = mode
self.escape = escape
self.run = run
self.resume = resume
self.save = save
self.readEncoding = sys.stdin.encoding if sys.stdin else None
self.writeEncoding = sys.stdout.encoding if sys.stdin else None
# private variables
self.__linenum = 0
self.__excludeblock = False
self.__ifblocks = []
self.__ifconditions = []
self.__evalsquelch = True
self.__outputBuffer = ''
def check_deprecation(self):
def deprecation(message):
import warnings
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(message, DeprecationWarning)
warnings.simplefilter('default', DeprecationWarning)
if self.escapeChar != None:
deprecation("'pypreprocessor.escapeChar' is deprecated. Use 'escape' instead.")
if self.escape == '#':
self.escape = self.escapeChar
if self.mode != None:
msg = "'pypreprocessor.mode' is deprecated. Use 'run/resume/save' options instead."
if self.run != True or self.resume != False or self.save != True:
msg += " Ignoring 'pypreprocessor.mode'."
else:
if self.mode.lower() == 'run':
self.run = True
self.resume = False
self.save = False
elif self.mode.lower() == 'pp':
self.run = False
self.resume = False
self.save = True
elif self.mode.lower() == 'ppcont':
self.run = False
self.resume = True
self.save = True
elif self.mode is not None:
print('Unknown mode : ' + str(self.mode))
deprecation(msg)
# reseting internal things to parse a second file
def __reset_internal(self):
self.__linenum = 0
self.__excludeblock = False
self.__ifblocks = []
self.__ifconditions = []
self.__evalsquelch = True
self.__outputBuffer = ''
# the #define directive
def define(self, define):
self.defines.append(define)
# the #undef directive
def undefine(self, define):
# re-map the defines list excluding the define specified in the args
self.defines[:] = [x for x in self.defines if x != define]
# search: if define is defined
def search_defines(self, define):
if define in self.defines:
return True
else:
return False
#returning: validness of #ifdef #else block
def __if(self):
value = bool(self.__ifblocks)
for ib in self.__ifblocks:
value *= ib #* represents and: value = value and ib
return not value #not: because True means removing
# evaluate
def lexer(self, line):
# strip any and all leading whitespace characters
line = line.lstrip()
# return values are (squelch, metadata)
if not (self.__ifblocks or self.__excludeblock):
if 'pypreprocessor.parse()' in line:
return True, True
#this block only for faster processing (not necessary)
elif line[:len(self.escape)] != self.escape:
return False, False
# handle #define directives
if line[:len(self.escape) + 6] == self.escape + 'define':
if len(line.split()) != 2:
self.exit_error(self.escape + 'define')
else:
self.define(line.split()[1])
return False, True
# handle #undef directives
elif line[:len(self.escape) + 5] == self.escape + 'undef':
if len(line.split()) != 2:
self.exit_error(self.escape + 'undef')
else:
self.undefine(line.split()[1])
return False, True
# handle #exclude directives
elif line[:len(self.escape) + 7] == self.escape + 'exclude':
if len(line.split()) != 1:
self.exit_error(self.escape + 'exclude')
else:
self.__excludeblock = True
return False, True
# handle #endexclude directives
elif line[:len(self.escape) + 10] == self.escape + 'endexclude':
if len(line.split()) != 1:
self.exit_error(self.escape + 'endexclude')
else:
self.__excludeblock = False
return False, True
# handle #ifnotdef directives (is the same as: #ifdef X #else)
elif line[:len(self.escape) + 8] == self.escape + 'ifdefnot':
if len(line.split()) != 2:
self.exit_error(self.escape + 'ifdefnot')
else:
self.__ifblocks.append(not self.search_defines(line.split()[1]))
self.__ifconditions.append(line.split()[1])
return False, True
# handle #ifdef directives
elif line[:len(self.escape) + 5] == self.escape + 'ifdef':
if len(line.split()) != 2:
self.exit_error(self.escape + 'ifdef')
else:
self.__ifblocks.append(self.search_defines(line.split()[1]))
self.__ifconditions.append(line.split()[1])
return False, True
# handle #else...
# handle #elseif directives
elif line[:len(self.escape) + 6] == self.escape + 'elseif':
if len(line.split()) != 2:
self.exit_error(self.escape + 'elseif')
else:
self.__ifblocks[-1] = not self.__ifblocks[-1]
#self.search_defines(self.__ifconditions[-1]))
self.__ifblocks.append(self.search_defines(line.split()[1]))
self.__ifconditions.append(line.split()[1])
return False, True
# handle #else directives
elif line[:len(self.escape) + 4] == self.escape + 'else':
if len(line.split()) != 1:
self.exit_error(self.escape + 'else')
else:
self.__ifblocks[-1] = not self.__ifblocks[-1]
#self.search_defines(self.__ifconditions[-1]))
return False, True
# handle #endif..
# handle #endififdef
elif line[:len(self.escape) + 10] == self.escape + 'endififdef':
if len(line.split()) != 2:
self.exit_error(self.escape + 'endififdef')
else:
if len(self.__ifconditions) >= 1:
self.__ifblocks.pop(-1)
self.__ifcondition = self.__ifconditions.pop(-1)
else:
self.__ifblocks = []
self.__ifconditions = []
self.__ifblocks.append(self.search_defines(line.split()[1]))
self.__ifconditions.append(line.split()[1])
return False, True
# handle #endifall directives
elif line[:len(self.escape) + 8] == self.escape + 'endifall':
if len(line.split()) != 1:
self.exit_error(self.escape + 'endifall')
else:
self.__ifblocks = []
self.__ifconditions = []
return False, True
# handle #endif and #endif numb directives
elif line[:len(self.escape) + 5] == self.escape + 'endif':
if len(line.split()) != 1:
self.exit_error(self.escape + 'endif number')
else:
try:
number = int(line[6:])
except ValueError as VE:
#print('ValueError',VE)
#self.exit_error(self.escape + 'endif number')
number = 1
if len(self.__ifconditions) > number:
for i in range(0, number):
self.__ifblocks.pop(-1)
self.__ifcondition = self.__ifconditions.pop(-1)
elif len(self.__ifconditions) == number:
self.__ifblocks = []
self.__ifconditions = []
else:
print('Warning try to remove more blocks than present', \
self.input, self.__linenum)
self.__ifblocks = []
self.__ifconditions = []
return False, True
else: #No directive --> execute
# process the excludeblock
if self.__excludeblock is True:
return True, False
# process the ifblock
elif self.__ifblocks: # is True:
return self.__if(), False
#here can add other stuff for deleting comnments eg
else:
return False, False
# error handling
def exit_error(self, directive):
print('File: "' + self.input + '", line ' + str(self.__linenum))
print('SyntaxError: Invalid ' + directive + ' directive')
sys.exit(1)
def rewrite_traceback(self):
trace = traceback.format_exc().splitlines()
index = 0
for line in trace:
if index == (len(trace) - 2):
print(line.replace("<string>", self.input))
else:
print(line)
index += 1
# parsing/processing
def __parse(self):
self.__reset_internal()
self.check_deprecation()
# open the input file
input_file = io.open(os.path.join(self.input), 'r', encoding=self.readEncoding)
try:
# process the input file
for line in input_file:
self.__linenum += 1
# to squelch or not to squelch
squelch, metaData = self.lexer(line)
# process and output
if self.removeMeta is True:
if metaData is True or squelch is True:
continue
if squelch is True:
if metaData:
self.__outputBuffer += self.escape + line
else:
self.__outputBuffer += self.escape[0] + line
continue
if squelch is False:
self.__outputBuffer += line
continue
finally:
input_file.close()
#Warnings for unclosed #ifdef blocks
if self.__ifblocks:
print('Warning: Number of unclosed Ifdefblocks: ', len(self.__ifblocks))
print('Can cause unwished behaviour in the preprocessed code, preprocessor is safe')
try:
select = input('Do you want more Information? ')
except SyntaxError:
select = 'no'
select = select.lower()
if select in ('yes', 'true', 'y', '1'):
print('Name of input and output file: ', self.input, ' ', self.output)
for i, item in enumerate(self.__ifconditions):
if (item in self.defines) != self.__ifblocks[i]:
cond = ' else '
else:
cond = ' if '
print('Block:', item, ' is in condition: ', cond)
def parse(self):
self.__parse()
self.post_process()
def parse_to_string(self):
self.__parse()
return self.__outputBuffer
# post-processor
def post_process(self):
try:
# set file name
if self.output == '':
self.output = self.input[0:-len(self.input.split('.')[-1])-1]+'_out.'+self.input.split('.')[-1]
# open file for output
output_file = io.open(self.output, 'w', encoding=self.writeEncoding)
# write post-processed code to file
output_file.write(self.__outputBuffer)
finally:
output_file.close()
if self.run:
# if this module is loaded as a library override the import
if imp.lock_held() is True:
self.override_import()
else:
self.on_the_fly()
if not self.save:
# remove tmp file
if os.path.exists(self.output):
os.remove(self.output)
if not self.resume:
# break execution so python doesn't
# run the rest of the pre-processed code
sys.exit(0)
# postprocessor - override an import
def override_import(self):
try:
moduleName = self.input.split('.')[0]
tmpModuleName = self.output.split('.')[0]
del sys.modules[moduleName]
sys.modules[tmpModuleName] = __import__(tmpModuleName)
sys.modules[moduleName] = __import__(tmpModuleName)
except:
self.rewrite_traceback()
finally:
# remove tmp (.py & .pyc) files
os.remove(self.output)
os.remove(self.output + 'c')
# postprocessor - on-the-fly execution
def on_the_fly(self):
try:
f = io.open(self.output, "r", encoding=self.readEncoding)
exec(f.read())
f.close()
except:
self.rewrite_traceback()
pypreprocessor = preprocessor()
|
the-stack_106_21341
|
# coding: utf-8
# http://www.matrix67.com/data/InvSqrt.pdf
import math
import struct
def change_format(ifmt, ofmt, a):
a = struct.pack(ifmt, a)
a = struct.unpack(ofmt, a)
return a[0]
def q_rsqrt(number):
threehalfs = 1.5
x2 = number * 0.5
y = number
i = change_format('f', 'l', y)
# evil floating point bit level hacking
i = 0x5f3759df - (i >> 1) # what the fuck?
y = change_format('l', 'f', i)
y = y * (threehalfs - x2 * y * y) # 1st iteration
y = y * (threehalfs - x2 * y * y) # 2nd iteration, this can be removed
return y
if __name__ == '__main__':
a = 2
print(q_rsqrt(a))
print(1 / math.sqrt(a))
|
the-stack_106_21343
|
"""Config flow for Ambiclimate."""
import logging
from aiohttp import web
import ambiclimate
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.network import get_url
from .const import (
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
DOMAIN,
STORAGE_KEY,
STORAGE_VERSION,
)
DATA_AMBICLIMATE_IMPL = "ambiclimate_flow_implementation"
_LOGGER = logging.getLogger(__name__)
@callback
def register_flow_implementation(hass, client_id, client_secret):
"""Register a ambiclimate implementation.
client_id: Client id.
client_secret: Client secret.
"""
hass.data.setdefault(DATA_AMBICLIMATE_IMPL, {})
hass.data[DATA_AMBICLIMATE_IMPL] = {
CONF_CLIENT_ID: client_id,
CONF_CLIENT_SECRET: client_secret,
}
class AmbiclimateFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize flow."""
self._registered_view = False
self._oauth = None
async def async_step_user(self, user_input=None):
"""Handle external yaml configuration."""
self._async_abort_entries_match()
config = self.hass.data.get(DATA_AMBICLIMATE_IMPL, {})
if not config:
_LOGGER.debug("No config")
return self.async_abort(reason="missing_configuration")
return await self.async_step_auth()
async def async_step_auth(self, user_input=None):
"""Handle a flow start."""
self._async_abort_entries_match()
errors = {}
if user_input is not None:
errors["base"] = "follow_link"
if not self._registered_view:
self._generate_view()
return self.async_show_form(
step_id="auth",
description_placeholders={
"authorization_url": await self._get_authorize_url(),
"cb_url": self._cb_url(),
},
errors=errors,
)
async def async_step_code(self, code=None):
"""Received code for authentication."""
self._async_abort_entries_match()
token_info = await self._get_token_info(code)
if token_info is None:
return self.async_abort(reason="access_token")
config = self.hass.data[DATA_AMBICLIMATE_IMPL].copy()
config["callback_url"] = self._cb_url()
return self.async_create_entry(title="Ambiclimate", data=config)
async def _get_token_info(self, code):
oauth = self._generate_oauth()
try:
token_info = await oauth.get_access_token(code)
except ambiclimate.AmbiclimateOauthError:
_LOGGER.error("Failed to get access token", exc_info=True)
return None
store = self.hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
await store.async_save(token_info)
return token_info
def _generate_view(self):
self.hass.http.register_view(AmbiclimateAuthCallbackView())
self._registered_view = True
def _generate_oauth(self):
config = self.hass.data[DATA_AMBICLIMATE_IMPL]
clientsession = async_get_clientsession(self.hass)
callback_url = self._cb_url()
return ambiclimate.AmbiclimateOAuth(
config.get(CONF_CLIENT_ID),
config.get(CONF_CLIENT_SECRET),
callback_url,
clientsession,
)
def _cb_url(self):
return f"{get_url(self.hass)}{AUTH_CALLBACK_PATH}"
async def _get_authorize_url(self):
oauth = self._generate_oauth()
return oauth.get_authorize_url()
class AmbiclimateAuthCallbackView(HomeAssistantView):
"""Ambiclimate Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
async def get(self, request: web.Request) -> str:
"""Receive authorization token."""
# pylint: disable=no-self-use
code = request.query.get("code")
if code is None:
return "No code"
hass = request.app["hass"]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": "code"}, data=code
)
)
return "OK!"
|
the-stack_106_21347
|
from __future__ import annotations
import re
from io import BytesIO
from typing import Any, Dict, List, Optional, Tuple
import structlog
from attr import attrs
from devtools import debug
from flask_sqlalchemy import SQLAlchemy
from ldif import LDIFParser
from tqdm import tqdm
from labster.di import injector
from labster.domain2.model.profile import Profile, ProfileRepository
from labster.domain2.model.structure import Structure, StructureRepository
from labster.ldap.constants import ADMINS_DN, PRESIDENCE_DN, SU_DN, \
get_parent_dn
logger = structlog.get_logger()
profile_repo = injector.get(ProfileRepository)
structure_repo = injector.get(StructureRepository)
db = injector.get(SQLAlchemy)
def parse_ldif_file(ldif_file: str) -> List[Tuple[str, Dict[str, Any]]]:
logger.info(f"### Parsing LDIF file {ldif_file}")
orig_ldif_fd = open(ldif_file, "rb")
ldif_fd = BytesIO()
for line in orig_ldif_fd.readlines():
if line.startswith(b"# search result"):
break
ldif_fd.write(line)
ldif_fd.seek(0)
parser = LDIFParser(ldif_fd)
return list(parser.parse())
@attrs(auto_attribs=True)
class LdifRecord:
raw: Dict[str, List[str]]
def __getattr__(self, name):
return self.raw.get(name, [""])[0]
@property
def uid(self) -> Optional[str]:
if "uid" not in self.raw:
return None
return self.raw["uid"][0]
@property
def affectation(self) -> Optional[str]:
structure_affectaction = self._get_structure_d_affectation()
if not structure_affectaction:
return None
affectation = structure_affectaction.dn
if affectation in ADMINS_DN:
affectation = get_parent_dn(affectation)
if affectation == PRESIDENCE_DN:
affectation = SU_DN
return affectation
def _get_structure_d_affectation(self) -> Optional[Structure]:
structure_d_affectation = None
affectation_principale = self.supannEntiteAffectationPrincipale
if affectation_principale:
structure_d_affectation = (
db.session.query(Structure)
.filter(Structure.supann_code_entite == affectation_principale)
.first()
)
if structure_d_affectation:
return structure_d_affectation
# Old LDIF format
affectation = self.sorbonneUniversiteEmpAffectation
if affectation:
structure_d_affectation = (
db.session.query(Structure).filter(Structure.dn == affectation).first()
)
if structure_d_affectation:
return structure_d_affectation
return None
@property
def fonctions(self):
return self.raw.get("eduPersonAffiliation", [])
@property
def address(self):
adresse = self.raw.get("postalAddress", [""])[0]
adresse = adresse.replace("$", "\n")
adresse = re.sub("\n\n+", "\n\n", adresse)
adresse = adresse.strip()
return adresse
def update_users_from_records(records: List[Tuple[str, Dict[str, List[str]]]]):
profiles = profile_repo.get_all()
old_profile_uids = {
profile.uid for profile in profiles if profile.uid and profile.active
}
count0 = len(old_profile_uids)
print(f"old total: {count0:d}")
logger.info(f"old total: {count0:d}")
new_profile_uids = set()
for _dn, r in records:
if "uid" not in r:
continue
uid = r["uid"][0]
new_profile_uids.add(uid)
deleted_uids = old_profile_uids.difference(new_profile_uids)
deactivate_users(deleted_uids)
uids_to_profiles = {p.uid: p for p in profiles}
print("Updating profiles from LDIF dump")
for _dn, r in tqdm(records, disable=None):
record = LdifRecord(r)
if not record.uid:
continue
uid = record.uid
if not uid:
continue
if uid in uids_to_profiles:
profile = uids_to_profiles[uid]
else:
profile = Profile(uid=uid)
profile_repo.put(profile)
update_profile_from_record(profile, record)
def deactivate_users(deleted_uids):
logger.info("To be deactivated:", deleted_uids=deleted_uids)
for uid in tqdm(deleted_uids, disable=None):
user = profile_repo.get_by_uid(uid)
user.deactivate()
def update_profile_from_record(profile: Profile, record: LdifRecord):
assert profile
profile.nom = record.sn
profile.prenom = record.givenName
profile.uid = record.uid
profile.email = record.mail
profile.telephone = record.telephoneNumber
profile.adresse = record.address
profile.login = record.supannAliasLogin
profile.adresse = record.adresse
affectation = record.affectation
if not affectation:
if profile.active:
profile.affectation = ""
profile.deactivate()
return
if not profile.active:
profile.activate()
if profile.affectation != affectation:
profile.affectation = affectation
fonctions = list(record.fonctions)
if set(profile.fonctions) != set(fonctions):
profile.fonctions = fonctions
|
the-stack_106_21349
|
#!/usr/bin/env python
# XDSIndexerSum.py
# Copyright (C) 2013 Diamond Light Source, Graeme Winter & Richard Gildea
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# 20th June 2013
#
# An reimplementation of the XDS indexer to work by first summing images before
# the spot finding for indexing. May or may not help...
from __future__ import absolute_import, division, print_function
import os
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import Debug
from xia2.Modules.Indexer.XDSIndexer import XDSIndexer
from xia2.Wrappers.XDS.Merge2cbf import Merge2cbf
# the class that we are extending
# odds and sods that are needed
# FIXME need to put in access here to Phil parameters to know how wide to make
# the summed images
class XDSIndexerSum(XDSIndexer):
"""An extension of XDSIndexer using all available images."""
def __init__(self):
super(XDSIndexerSum, self).__init__()
# XDSIndexer.__init__ modfies this!
self._index_select_images = _index_select_images
# helper functions
def _index_select_images(self):
"""Select correct images based on image headers."""
# FIXME in here (i) sum the images defined from the existing class
# contents then (ii) change the template stored, the directory and
# the header contents to correspond to those new images. Finally make
# a note of these changes so we can correct XPARM file at the end.
assert min(self.get_matching_images()) == 1
# make a note so we can fix the XPARM.XDS file at the end
self._true_phi_width = self.get_header_item("phi_width")
params = PhilIndex.params.xds.merge2cbf
if params.data_range is None:
params.data_range = 1, len(self.get_matching_images())
m2c = Merge2cbf(params=params)
m2c.setup_from_image(self.get_image_name(1))
m2c.set_working_directory(
os.path.join(self.get_working_directory(), "summed_images")
)
os.mkdir(m2c.get_working_directory())
m2c.run()
# Is this safe to do?
self._setup_from_image(
os.path.join(m2c.get_working_directory(), "merge2cbf_averaged_0001.cbf")
)
phi_width = self.get_header_item("phi_width")
if phi_width == 0.0:
raise RuntimeError("cannot use still images")
# use five degrees for the background calculation
five_deg = int(round(5.0 / phi_width)) - 1
if five_deg < 5:
five_deg = 5
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block? if it is the
# former then we have a problem, as we want *all* the images in the
# sweep...
wedges = []
min_images = params.xia2.settings.input.min_images
if len(images) < 3 and len(images) < min_images:
raise RuntimeError(
"This INDEXER cannot be used for only %d images" % len(images)
)
Debug.write("Adding images for indexer: %d -> %d" % (min(images), max(images)))
wedges.append((min(images), max(images)))
# FIXME this should have a wrapper function!
if min(images) + five_deg in images:
self._background_images = (min(images), min(images) + five_deg)
else:
self._background_images = (min(images), max(images))
return wedges
# FIXME here override _index_finish by calling original _index_finish
# then correcting the XPARM file as mentioned above.
def _index_finish(self):
self._modify_xparm_xds()
XDSIndexer._index_finish(self)
def _modify_xparm_xds(self):
import fileinput
xparm_filename = os.path.join(self.get_working_directory(), "XPARM.XDS")
assert os.path.isfile(xparm_filename)
f = fileinput.input(xparm_filename, mode="rb", inplace=1)
updated_oscillation_range = False
for line in f:
if not updated_oscillation_range:
# Starting image number (STARTING_FRAME=),
# spindle angle at start (STARTING_ANGLE=),
# oscillation range,
# and laboratory coordinates of the rotation axis.
tokens = line.split()
if len(tokens) == 6:
summed_oscillation_range = float(tokens[2])
# sanity check - is this actually necessary?
assert (
summed_oscillation_range - self.get_header_item("phi_width")
) < 1e-6
tokens[2] = "%.4f" % self._true_phi_width
print(" ".join(tokens))
continue
print(line, end=" ")
f.close()
# copy across file contents internally
self._data_files["XPARM.XDS"] = open(xparm_filename, mode="rb").read()
|
the-stack_106_21351
|
# -*- coding: utf-8 -*-
import numpy as np
import welltestpy as wtp
import seaborn as sns
import pandas as pd
from anaflow import thiem
from ogs5py import specialrange
from matplotlib import pyplot as plt
#from gstools import SRF, Gaussian
from project import get_srf, ogs_2d, priors
from scipy.optimize import curve_fit
from pathlib import Path
# discretization and parameters
time = specialrange(0, 7200, 50, typ="cub")
rad = specialrange(0, 1000, 100, typ="cub")
angles = 32
#storage = 1e-3
T_const = 1e-5
rate = -1e-3
well_pos = rad[[0, 6, 11, 16, 21, 31, 41, 51, 61, 71, 81]]
pwell_pos = np.array([well_pos[0], 0.0])
owell_pos = well_pos[1:]
def run_ogs_project(model):
model.write_input()
success = model.run_model()
return success
def import_ogs_results(model, owell_name = "owell"):
point = model.readtec_point(pcs="GROUNDWATER_FLOW")
time = point[owell_name]["TIME"]
head = point[owell_name]["HEAD"]
return head, time
if __name__ == '__main__':
owell_pos = [[1, 0], [3, 2], [5, 2], [0, -10], [-30, 0], [0, 50], [100, 0], [0, -250], [-500, 0]]
owell_pos = np.array(owell_pos)
field = wtp.FieldSite(name="Pump test data worth", coordinates=[51.353839, 12.431385])
campaign = wtp.Campaign(name="Transient-multi", fieldsite=field)
campaign.add_well(name="pwell", radius=0.1, coordinates=(0.0, 0.0))
campaign.add_well(name="owell_0", radius=0.1, coordinates=(1.0, 0.0))
campaign.add_well(name="owell_1", radius=0.1, coordinates=(3.0, 2.0))
campaign.add_well(name="owell_2", radius=0.1, coordinates=(5.0, 2.0))
campaign.add_well(name="owell_3", radius=0.1, coordinates=(0, -10.0))
campaign.add_well(name="owell_4", radius=0.1, coordinates=(-30, 0.0))
campaign.add_well(name="owell_5", radius=0.1, coordinates=(0, 50.0))
campaign.add_well(name="owell_6", radius=0.1, coordinates=(100, 0.0))
campaign.add_well(name="owell_7", radius=0.1, coordinates=(0, -250))
campaign.add_well(name="owell_8", radius=0.1, coordinates=(-500, 0))
# campaign.plot_wells()
model = ogs_2d.init_ogs_project(rad, task_root="pump_2d_trans")
T_field = get_srf.get_srf_2d(model,
mean = -5, var = 1.1, len_scale = 100, seed = 1)
model = ogs_2d.write_ogs_project(model, owell_pos, T_field, time=time)
succes = run_ogs_project(model)
head_0, time_0 = import_ogs_results(model, owell_name = "owell_0")
head_1, time_1 = import_ogs_results(model, owell_name = "owell_1")
head_2, time_2 = import_ogs_results(model, owell_name = "owell_2")
head_3, time_3 = import_ogs_results(model, owell_name = "owell_3")
head_4, time_4 = import_ogs_results(model, owell_name = "owell_4")
head_5, time_5 = import_ogs_results(model, owell_name = "owell_5")
head_6, time_6 = import_ogs_results(model, owell_name = "owell_6")
head_7, time_7 = import_ogs_results(model, owell_name = "owell_7")
head_8, time_8 = import_ogs_results(model, owell_name = "owell_8")
pumptest = wtp.PumpingTest(
name="pwell",
pumpingwell="pwell",
pumpingrate=rate,
description="Virtual transient 2d pumping test",
)
pumptest.add_transient_obs("owell_0", time_0, head_0)
pumptest.add_transient_obs("owell_1", time_1, head_1)
pumptest.add_transient_obs("owell_2", time_2, head_2)
pumptest.add_transient_obs("owell_3", time_3, head_3)
pumptest.add_transient_obs("owell_4", time_4, head_4)
pumptest.add_transient_obs("owell_5", time_5, head_5)
pumptest.add_transient_obs("owell_6", time_6, head_6)
pumptest.add_transient_obs("owell_7", time_7, head_7)
pumptest.add_transient_obs("owell_8", time_8, head_8)
campaign.addtests(pumptest)
campaign.save(path = "../data/")
plt.plot(time_0, head_0)
plt.plot(time_1, head_1)
plt.plot(time_2, head_2)
plt.plot(time_3, head_3)
plt.plot(time_4, head_4)
plt.plot(time_5, head_5)
plt.plot(time_6, head_6)
plt.plot(time_7, head_7)
plt.plot(time_8, head_8)
plt.show()
# print(head_0)
|
the-stack_106_21357
|
"""
Work with references in the database
Schema:
- _id (str) the instance-unique reference ID
- cloned_from (Object) describes the reference this one was cloned from (can be null)
- id (str) the ID of the source reference
- name (str) the name of the source reference at the time of cloning
- created_at (datetime) when the reference was created
- data_type (Enum["genome", "barcode"]) the type of data stored in the reference
- description (str) a user-defined description for the the reference
- groups (List[Object]) describes groups assigned to the reference and their rights
- id (str) the group ID
- build (bool) the group can create new builds of the reference
- modify (bool) the group can modify the non-OTU reference data
- modify_otu (bool) the group can modify OTUs
- remove (bool) the group can remove the reference
- groups (List[Object]) describes users assigned to the reference and their rights
- id (str) the user ID
- build (bool) the user can create new builds of the reference
- modify (bool) the user can modify the non-OTU reference data
- modify_otu (bool) the user can modify OTUs
- remove (bool) the user can remove the reference
- internal_control (str) the ID for an OTU that is used as an internal control in the lab
- name (str) the reference name
- organism (str) the organism represented in the reference (eg. virus, bacteria, fungus)
- task (Object) a task associated with a current reference operation
- id (str) the task ID
- release (Object) describes the latest remote reference release
- body (str) the Markdown-formatted release body from GitHub
- content_type (str) release content type - should always be application/gzip
- download_url (str) the GitHUB URL at which the reference release can be downloaded
- etag (str) the release ETag - allows caching of the release check result
- filename (str) the name of the release file
- html_url (str) the URL to the web page for the release on GitHub
- id (str) the unique ID for the release from GitHub
- name (str) the name of the release (eg. v1.4.0)
- newer (bool) true if there is a newer release available
- published_at (datetime) when the release was published on GitHub
- retrieved_at (datetime) when teh release was retrieved from GitHub
- size (int) size of the release file in bytes
- remotes_from (Object) describes where the reference remotes from (can be null)
- errors (Array) errors related to the remote reference
- slug (str) the GitHub repository slug for the reference
- restrict_source_types (bool) restricts the source types users may use when creating isolates
- source_types (Array[str]) a set of allowable source types
- updates (Array[Object]) a history of updates applied to the remote reference
- SHARES FIELDS WITH release
- user (Object) describes the user that applied the update
- id (str) the user ID
- updating (bool) the remote reference is being updated
- user (Object) describes the creating user
- id (str) the user ID
"""
import asyncio
import datetime
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pymongo
import virtool.db.utils
import virtool.errors
import virtool.github
import virtool.history.db
import virtool.tasks.pg
import virtool.utils
from aiohttp import ClientConnectorError
from aiohttp.web import Request
from semver import VersionInfo
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from virtool.http.utils import download_file
from virtool.otus.db import join
from virtool.otus.utils import verify
from virtool.pg.utils import get_row
from virtool.references.utils import (
RIGHTS,
check_will_change,
clean_export_list,
get_owner_user,
load_reference_file,
)
from virtool.settings.db import Settings
from virtool.types import App
from virtool.uploads.models import Upload
from virtool.users.db import attach_user, extend_user
PROJECTION = [
"_id",
"remotes_from",
"cloned_from",
"created_at",
"data_type",
"imported_from",
"installed",
"internal_control",
"latest_build",
"name",
"organism",
"release",
"remotes_from",
"task",
"unbuilt_count",
"updates",
"updating",
"user",
"users",
"groups",
]
async def processor(db, document: dict) -> dict:
"""
Process a reference document to a form that can be dispatched or returned in a list.
Used `attach_computed` for complete representations of the reference.
:param db: the application database client
:param document: the document to process
:return: the processed document
"""
try:
ref_id = document.pop("_id")
except KeyError:
ref_id = document["id"]
latest_build, otu_count, unbuilt_count = await asyncio.gather(
get_latest_build(db, ref_id),
get_otu_count(db, ref_id),
get_unbuilt_count(db, ref_id),
)
document.update(
{
"latest_build": latest_build,
"otu_count": otu_count,
"unbuilt_change_count": unbuilt_count,
}
)
try:
document["installed"] = document.pop("updates")[-1]
except (KeyError, IndexError):
pass
document["id"] = ref_id
return await attach_user(db, document)
async def attach_computed(db, document: dict) -> dict:
"""
Get all computed data for the specified reference and attach it to the passed `document`.
:param db: the application database client
:param document: the document to attached computed data to
:return: the updated document
"""
ref_id = document["_id"]
try:
internal_control_id = document["internal_control"]["id"]
except (KeyError, TypeError):
internal_control_id = None
(
contributors,
internal_control,
latest_build,
otu_count,
users,
unbuilt_count,
) = await asyncio.gather(
get_contributors(db, ref_id),
get_internal_control(db, internal_control_id, ref_id),
get_latest_build(db, ref_id),
get_otu_count(db, ref_id),
get_reference_users(db, document),
get_unbuilt_count(db, ref_id),
)
processed = virtool.utils.base_processor(
{
**document,
"contributors": contributors,
"internal_control": internal_control or None,
"latest_build": latest_build,
"otu_count": otu_count,
"unbuilt_change_count": unbuilt_count,
"users": users,
}
)
return await attach_user(db, processed)
async def get_reference_users(db, document: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Get a detailed list of users that have access to the specified reference.
:param db: the application database client
:param document: the reference document
:return: a list of user data dictionaries
"""
if not document.get("users"):
return []
return await asyncio.gather(*[extend_user(db, user) for user in document["users"]])
async def add_group_or_user(db, ref_id: str, field: str, data: dict) -> Optional[dict]:
document = await db.references.find_one({"_id": ref_id}, [field])
if not document:
return None
subdocument_id = data.get("group_id") or data["user_id"]
if (
field == "groups"
and await db.groups.count_documents({"_id": subdocument_id}) == 0
):
raise virtool.errors.DatabaseError("group does not exist")
if (
field == "users"
and await db.users.count_documents({"_id": subdocument_id}) == 0
):
raise virtool.errors.DatabaseError("user does not exist")
if subdocument_id in [s["id"] for s in document[field]]:
raise virtool.errors.DatabaseError(field[:-1] + " already exists")
rights = {key: data.get(key, False) for key in RIGHTS}
subdocument = {
"id": subdocument_id,
"created_at": virtool.utils.timestamp(),
**rights,
}
await db.references.update_one({"_id": ref_id}, {"$push": {field: subdocument}})
return subdocument
async def check_right(req: Request, reference: dict, right: str) -> bool:
if req["client"].administrator:
return True
user_id = req["client"].user_id
try:
groups = reference["groups"]
users = reference["users"]
except (KeyError, TypeError):
reference = await req.app["db"].references.find_one(
reference, ["groups", "users"]
)
groups = reference["groups"]
users = reference["users"]
for user in users:
if user["id"] == user_id:
if user[right]:
return True
break
for group in groups:
if group[right] and group["id"] in req["client"].groups:
return True
return False
async def check_source_type(db, ref_id: str, source_type: str) -> bool:
"""
Check if the provided `source_type` is valid based on the current reference source type configuration.
:param db: the application database client
:param ref_id: the reference context
:param source_type: the source type to check
:return: source type is valid
"""
document = await db.references.find_one(
ref_id, ["restrict_source_types", "source_types"]
)
restrict_source_types = document.get("restrict_source_types", False)
source_types = document.get("source_types", list())
if source_type == "unknown":
return True
# Return `False` when source_types are restricted and source_type is not allowed.
if source_type and restrict_source_types:
return source_type in source_types
# Return `True` when:
# - source_type is empty string (unknown)
# - source_types are not restricted
# - source_type is an allowed source_type
return True
def compose_base_find_query(user_id: str, administrator: bool, groups: list) -> dict:
"""
Compose a query for filtering reference search results based on user read rights.
:param user_id: the id of the user requesting the search
:param administrator: the administrator flag of the user requesting the search
:param groups: the id group membership of the user requesting the search
:return: a valid MongoDB query
"""
if administrator:
return dict()
is_user_member = {"users.id": user_id}
is_group_member = {"groups.id": {"$in": groups}}
is_owner = {"user.id": user_id}
return {"$or": [is_group_member, is_user_member, is_owner]}
async def delete_group_or_user(
db, ref_id: str, subdocument_id: str, field: str
) -> Optional[str]:
"""
Delete an existing group or user as decided by the `field` argument.
:param db: the application database client
:param ref_id: the id of the reference to modify
:param subdocument_id: the id of the group or user to delete
:param field: the field to modify: 'group' or 'user'
:return: the id of the removed subdocument
"""
document = await db.references.find_one(
{"_id": ref_id, field + ".id": subdocument_id}, [field]
)
if document is None:
return None
# Retain only the subdocuments that don't match the passed `subdocument_id`.
filtered = [s for s in document[field] if s["id"] != subdocument_id]
await db.references.update_one({"_id": ref_id}, {"$set": {field: filtered}})
return subdocument_id
async def edit_group_or_user(
db, ref_id: str, subdocument_id: str, field: str, data: dict
) -> Optional[dict]:
"""
Edit an existing group or user as decided by the `field` argument. Returns `None` if the reference, group, or user
does not exist.
:param db: the application database client
:param ref_id: the id of the reference to modify
:param subdocument_id: the id of the group or user to modify
:param field: the field to modify: 'group' or 'user'
:param data: the data to update the group or user with
:return: the modified subdocument
"""
document = await db.references.find_one(
{"_id": ref_id, field + ".id": subdocument_id}, [field]
)
if document is None:
return None
for subdocument in document[field]:
if subdocument["id"] == subdocument_id:
rights = {key: data.get(key, subdocument[key]) for key in RIGHTS}
subdocument.update(rights)
await db.references.update_one(
{"_id": ref_id}, {"$set": {field: document[field]}}
)
return subdocument
async def fetch_and_update_release(
app, ref_id: str, ignore_errors: bool = False
) -> dict:
"""
Get the latest release for the GitHub repository identified by the passed `slug`. If a release is found, update the
reference identified by the passed `ref_id` and return the release.
Exceptions can be ignored during the GitHub request. Error information will still be written to the reference
document.
:param app: the application object
:param ref_id: the id of the reference to update
:param ignore_errors: ignore exceptions raised during GitHub request
:return: the latest release
"""
db = app["db"]
retrieved_at = virtool.utils.timestamp()
document = await db.references.find_one(
ref_id, ["installed", "release", "remotes_from"]
)
release = document.get("release")
etag = virtool.github.get_etag(release)
# Variables that will be used when trying to fetch release from GitHub.
errors = list()
updated = None
try:
updated = await virtool.github.get_release(
app["config"], app["client"], document["remotes_from"]["slug"], etag
)
if updated:
updated = virtool.github.format_release(updated)
except (ClientConnectorError, virtool.errors.GitHubError) as err:
if "ClientConnectorError" in str(err):
errors = ["Could not reach GitHub"]
if "404" in str(err):
errors = ["GitHub repository or release does not exist"]
if errors and not ignore_errors:
raise
if updated:
release = updated
if release:
installed = document["installed"]
release["newer"] = bool(
installed
and VersionInfo.parse(release["name"].lstrip("v"))
> VersionInfo.parse(installed["name"].lstrip("v"))
)
release["retrieved_at"] = retrieved_at
await db.references.update_one(
{"_id": ref_id}, {"$set": {"errors": errors, "release": release}}
)
return release
async def get_contributors(db, ref_id: str) -> Optional[List[dict]]:
"""
Return an list of contributors and their contribution count for a specific ref.
:param db: the application database client
:param ref_id: the id of the ref to get contributors for
:return: a list of contributors to the ref
"""
return await virtool.history.db.get_contributors(db, {"reference.id": ref_id})
async def get_internal_control(
db, internal_control_id: Optional[str], ref_id: str
) -> Optional[dict]:
"""
Return a minimal dict describing the ref internal control given a `otu_id`.
:param db: the application database client
:param internal_control_id: the id of the otu to create a minimal dict for
:param ref_id: the id of the reference to look for the control OTU in
:return: a minimal dict describing the ref internal control
"""
if internal_control_id is None:
return None
name = await virtool.db.utils.get_one_field(
db.otus, "name", {"_id": internal_control_id, "reference.id": ref_id}
)
if name is None:
return None
return {"id": internal_control_id, "name": name}
async def get_latest_build(db, ref_id: str) -> Optional[dict]:
"""
Return the latest index build for the ref.
:param db: the application database client
:param ref_id: the id of the ref to get the latest build for
:return: a subset of fields for the latest build
"""
latest_build = await db.indexes.find_one(
{"reference.id": ref_id, "ready": True},
projection=["created_at", "version", "user", "has_json"],
sort=[("version", pymongo.DESCENDING)],
)
if latest_build is None:
return None
return virtool.utils.base_processor(latest_build)
async def get_official_installed(db) -> bool:
"""
Return a boolean indicating whether the official plant virus reference is installed.
:param db:
:return: official reference install status
"""
return (
await db.references.count_documents(
{"remotes_from.slug": "virtool/ref-plant-viruses"}
)
> 0
)
async def get_manifest(db, ref_id: str) -> dict:
"""
Generate a dict of otu document version numbers keyed by the document id. This is used to make sure only changes
made at the time the index rebuild was started are included in the build.
:param db: the application database client
:param ref_id: the id of the reference to get the current index for
:return: a manifest of otu ids and versions
"""
manifest = dict()
async for document in db.otus.find({"reference.id": ref_id}, ["version"]):
manifest[document["_id"]] = document["version"]
return manifest
async def get_otu_count(db, ref_id: str) -> int:
"""
Get the number of OTUs associated with the given `ref_id`.
:param db: the application database client
:param ref_id: the id of the reference to get the current index for
:return: the OTU count
"""
return await db.otus.count_documents({"reference.id": ref_id})
async def get_unbuilt_count(db, ref_id: str) -> int:
"""
Return a count of unbuilt history changes associated with a given `ref_id`.
:param db: the application database client
:param ref_id: the id of the ref to count unbuilt changes for
:return: the number of unbuilt changes
"""
return await db.history.count_documents(
{"reference.id": ref_id, "index.id": "unbuilt"}
)
async def create_clone(
db, settings: Settings, name: str, clone_from: str, description: str, user_id: str
) -> dict:
source = await db.references.find_one(clone_from)
name = name or "Clone of " + source["name"]
document = await create_document(
db,
settings,
name,
source["organism"],
description,
source["data_type"],
created_at=virtool.utils.timestamp(),
user_id=user_id,
)
document["cloned_from"] = {"id": clone_from, "name": source["name"]}
return document
async def create_document(
db,
settings: Settings,
name: str,
organism: Optional[str],
description: str,
data_type: Optional[str],
created_at=None,
ref_id: Optional[str] = None,
user_id: Optional[str] = None,
users=None,
):
if ref_id and await db.references.count_documents({"_id": ref_id}):
raise virtool.errors.DatabaseError("ref_id already exists")
ref_id = ref_id or await virtool.db.utils.get_new_id(db.otus)
user = None
if user_id:
user = {"id": user_id}
if not users:
users = [get_owner_user(user_id)]
document = {
"_id": ref_id,
"created_at": created_at or virtool.utils.timestamp(),
"data_type": data_type,
"description": description,
"name": name,
"organism": organism,
"internal_control": None,
"restrict_source_types": False,
"source_types": settings.default_source_types,
"groups": list(),
"users": users,
"user": user,
}
if data_type == "barcode":
document["targets"] = list()
return document
async def create_import(
db,
pg: AsyncEngine,
settings: Settings,
name: str,
description: str,
import_from: str,
user_id: str,
) -> dict:
"""
Import a previously exported Virtool reference.
:param db: the application database client
:param pg: PostgreSQL database object
:param settings: the application settings object
:param name: the name for the new reference
:param description: a description for the new reference
:param import_from: the uploaded file to import from
:param user_id: the id of the creating user
:return: a reference document
"""
created_at = virtool.utils.timestamp()
document = await create_document(
pg,
settings,
name or "Unnamed Import",
None,
description,
None,
created_at=created_at,
user_id=user_id,
)
upload = await get_row(pg, Upload, ("name_on_disk", import_from))
document["imported_from"] = upload.to_dict()
return document
async def create_remote(
db, settings: Settings, release: dict, remote_from: str, user_id: str
) -> dict:
"""
Create a remote reference document in the database.
:param db: the application database object
:param settings: the application settings
:param release: the latest release for the remote reference
:param remote_from: information about the remote (errors, GitHub slug)
:param user_id: the id of the requesting user
:return: the new reference document
"""
created_at = virtool.utils.timestamp()
document = await create_document(
db,
settings,
"Plant Viruses",
None,
"The official plant virus reference from the Virtool developers",
None,
created_at=created_at,
user_id=user_id,
)
return {
**document,
# Connection information for the GitHub remote repo.
"remotes_from": {"errors": [], "slug": remote_from},
# The latest available release on GitHub.
"release": dict(release, retrieved_at=created_at),
# The update history for the reference. We put the release being installed as the first history item.
"updates": [
virtool.github.create_update_subdocument(
release, False, user_id, created_at
)
],
"installed": None,
}
async def download_and_parse_release(
app, url: str, task_id: int, progress_handler: callable
):
pg = app["pg"]
with virtool.utils.get_temp_dir() as tempdir:
download_path = Path(tempdir) / "reference.tar.gz"
await download_file(app, url, download_path, progress_handler)
await virtool.tasks.pg.update(pg, task_id, step="unpack")
return await app["run_in_thread"](load_reference_file, download_path)
async def edit(db, ref_id: str, data: dict) -> dict:
"""
Edit and existing reference using the passed update `data`.
:param db: the application database object
:param ref_id: the id of the reference to update
:param data: update data from the HTTP request
:return: the updated reference document
"""
document = await db.references.find_one(ref_id)
if document["data_type"] != "barcode":
data.pop("targets", None)
document = await db.references.find_one_and_update({"_id": ref_id}, {"$set": data})
document = await attach_computed(db, document)
if "name" in data:
await db.analyses.update_many(
{"reference.id": ref_id}, {"$set": {"reference.name": document["name"]}}
)
return document
async def export(app: App, ref_id: str) -> list:
db = app["db"]
otu_list = list()
query = {"reference.id": ref_id, "last_indexed_version": {"$ne": None}}
async for document in db.otus.find(query):
_, joined, _ = await virtool.history.db.patch_to_version(
app, document["_id"], document["last_indexed_version"]
)
otu_list.append(joined)
return clean_export_list(otu_list)
async def insert_change(
app, otu_id: str, verb: str, user_id: str, old: Optional[dict] = None
):
"""
Insert a history document for the OTU identified by `otu_id` and the passed `verb`.
:param app: the application object
:param otu_id: the ID of the OTU the change is for
:param verb: the change verb (eg. remove, insert)
:param user_id: the ID of the requesting user
:param old: the old joined OTU document
"""
db = app["db"]
# Join the otu document into a complete otu record. This will be used for recording history.
joined = await join(db, otu_id)
name = joined["name"]
e = "" if verb[-1] == "e" else "e"
# Build a ``description`` field for the otu creation change document.
description = f"{verb.capitalize()}{e}d {name}"
abbreviation = joined.get("abbreviation")
# Add the abbreviation to the description if there is one.
if abbreviation:
description = f"{description} ({abbreviation})"
await virtool.history.db.add(
app, verb, old, joined, description, user_id, silent=True
)
async def insert_joined_otu(
db,
otu: dict,
created_at: datetime.datetime,
ref_id: str,
user_id: str,
remote: bool = False,
) -> str:
all_sequences = list()
issues = verify(otu)
otu.update(
{
"created_at": created_at,
"lower_name": otu["name"].lower(),
"last_indexed_version": None,
"issues": issues,
"verified": issues is None,
"imported": True,
"version": 0,
"reference": {"id": ref_id},
"user": {"id": user_id},
}
)
if "schema" not in otu:
otu["schema"] = list()
remote_id = otu.pop("_id")
if remote:
otu["remote"] = {"id": remote_id}
for isolate in otu["isolates"]:
for sequence in isolate.pop("sequences"):
try:
remote_sequence_id = sequence["remote"]["id"]
sequence.pop("_id")
except KeyError:
remote_sequence_id = sequence.pop("_id")
all_sequences.append(
{
**sequence,
"accession": sequence["accession"],
"isolate_id": isolate["id"],
"segment": sequence.get("segment", ""),
"reference": {"id": ref_id},
"remote": {"id": remote_sequence_id},
}
)
document = await db.otus.insert_one(otu, silent=True)
for sequence in all_sequences:
await db.sequences.insert_one(
dict(sequence, otu_id=document["_id"]), silent=True
)
return document["_id"]
async def refresh_remotes(app: App):
db = app["db"]
try:
logging.debug("Started reference refresher")
while True:
for ref_id in await db.references.distinct(
"_id", {"remotes_from": {"$exists": True}}
):
await fetch_and_update_release(app, ref_id, ignore_errors=True)
await asyncio.sleep(600)
except asyncio.CancelledError:
pass
logging.debug("Stopped reference refresher")
async def update(
req: Request,
created_at: datetime.datetime,
task_id: int,
ref_id: str,
release: dict,
user_id: str,
) -> tuple:
db = req.app["db"]
update_subdocument = virtool.github.create_update_subdocument(
release, False, user_id, created_at
)
await db.references.update_one(
{"_id": ref_id},
{
"$push": {"updates": update_subdocument},
"$set": {"task": {"id": task_id}, "updating": True},
},
)
return release, update_subdocument
async def update_joined_otu(
db, otu: dict, created_at: datetime.datetime, ref_id: str, user_id: str
) -> Union[dict, str, None]:
remote_id = otu["_id"]
old = await join(db, {"reference.id": ref_id, "remote.id": remote_id})
if old:
if not check_will_change(old, otu):
return None
sequence_updates = list()
for isolate in otu["isolates"]:
for sequence in isolate.pop("sequences"):
sequence_updates.append(
{
"accession": sequence["accession"],
"definition": sequence["definition"],
"host": sequence["host"],
"segment": sequence.get("segment", ""),
"sequence": sequence["sequence"],
"otu_id": old["_id"],
"isolate_id": isolate["id"],
"reference": {"id": ref_id},
"remote": {"id": sequence["_id"]},
}
)
await db.otus.update_one(
{"_id": old["_id"]},
{
"$inc": {"version": 1},
"$set": {
"abbreviation": otu["abbreviation"],
"name": otu["name"],
"lower_name": otu["name"].lower(),
"isolates": otu["isolates"],
"schema": otu.get("schema", list()),
},
},
)
for sequence_update in sequence_updates:
remote_sequence_id = sequence_update["remote"]["id"]
update_result = await db.sequences.update_one(
{"reference.id": ref_id, "remote.id": remote_sequence_id},
{"$set": sequence_update},
)
if not update_result.matched_count:
await db.sequences.insert_one(sequence_update)
return old
return await insert_joined_otu(db, otu, created_at, ref_id, user_id, remote=True)
|
the-stack_106_21360
|
"""
License: MIT <http://brianmhunt.mit-license.org/>
"""
import sys
#sys.path.append('/home/tobias/data/git/wodore-gae/main/')
#sys.path.append('./')
sys.path.append('./model')
import logging
import unittest
from google.appengine.ext import ndb#, testbed
from counter import CountableLazy
class TestCountLazyModel(CountableLazy, ndb.Model):
"""This is a test class for trying out counters
"""
name = ndb.StringProperty()
class TestCountLazyModelExtended(CountableLazy, ndb.Model):
"""This is a test class for trying out counters
"""
name = ndb.StringProperty()
toplevel = ndb.KeyProperty()
collection = ndb.StringProperty(required=True, indexed=True,
default='global', validator=lambda p, v: v.lower())
class TestTags(unittest.TestCase):
# enable the datastore stub
nosegae_datastore_v3 = True
nosegae_memcache = True
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
tclm = TestCountLazyModel(name="X")
tclm.put()
assert tclm is not None
self.assertEqual(tclm.count, 0)
def test_counter_incr(self):
tclm = TestCountLazyModel(name="X")
tclm.incr()
self.assertEqual(tclm.count, 1)
tclm.incr(2)
self.assertEqual(tclm.count, 3)
tclm.put()
self.assertEqual(tclm.count, 3)
def test_counter_decr(self):
tclm = TestCountLazyModel(name="X")
tclm.decr()
self.assertEqual(tclm.count, -1)
tclm.decr(2)
self.assertEqual(tclm.count, -3)
tclm.put()
def test_counter_saved(self):
tclm = TestCountLazyModel(name="X")
tclm.incr()
key = tclm.put()
self.assertEqual(str(key),"Key('TestCountLazyModel', 1)")
tclm2 = key.get()
self.assertEqual(tclm2.count, 1)
def test_counter_incr_with_toplevel(self):
top = TestCountLazyModelExtended(name="top")
top_key = top.put()
mid = TestCountLazyModelExtended(name="mid",
toplevel = top_key,
collection = 'mid')
mid_key = mid.put()
bot = TestCountLazyModelExtended(name="bot",
toplevel = mid_key,
collection = 'bot')
bot_key = bot.put()
self.assertEqual(top.count, 0)
self.assertEqual(mid.count, 0)
self.assertEqual(bot.count, 0)
bot.incr()
bot.put()
self.assertEqual(top.count, 1)
self.assertEqual(mid.count, 1)
self.assertEqual(bot.count, 1)
mid.incr()
mid.put()
self.assertEqual(top.count, 2)
self.assertEqual(mid.count, 2)
self.assertEqual(bot.count, 1)
top.incr()
top.put()
self.assertEqual(top.count, 3)
self.assertEqual(mid.count, 2)
self.assertEqual(bot.count, 1)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_21362
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import datetime
from collections import deque
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
from paddle import fluid
from ppdet.experimental import mixed_precision_context
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.data.data_feed import create_reader
from ppdet.utils.cli import print_total_cfg
from ppdet.utils import dist_utils
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
from ppdet.utils.stats import TrainingStats
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu, check_version
import ppdet.utils.checkpoint as checkpoint
from ppdet.modeling.model_input import create_feed
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def main():
env = os.environ
FLAGS.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
if FLAGS.dist:
trainer_id = int(env['PADDLE_TRAINER_ID'])
import random
local_seed = (99 + trainer_id)
random.seed(local_seed)
np.random.seed(local_seed)
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(FLAGS.opt)
if 'log_iter' not in cfg:
cfg.log_iter = 20
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
if not FLAGS.dist or trainer_id == 0:
print_total_cfg(cfg)
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(os.environ.get('CPU_NUM', 1))
if 'train_feed' not in cfg:
train_feed = create(main_arch + 'TrainFeed')
else:
train_feed = create(cfg.train_feed)
if FLAGS.eval:
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
eval_feed = create(cfg.eval_feed)
if 'FLAGS_selected_gpus' in env:
device_id = int(env['FLAGS_selected_gpus'])
else:
device_id = 0
place = fluid.CUDAPlace(device_id) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
lr_builder = create('LearningRate')
optim_builder = create('OptimizerBuilder')
# build program
startup_prog = fluid.Program()
train_prog = fluid.Program()
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
model = create(main_arch)
train_loader, feed_vars = create_feed(train_feed)
if FLAGS.fp16:
assert (getattr(model.backbone, 'norm_type', None)
!= 'affine_channel'), \
'--fp16 currently does not support affine channel, ' \
' please modify backbone settings to use batch norm'
with mixed_precision_context(FLAGS.loss_scale, FLAGS.fp16) as ctx:
train_fetches = model.train(feed_vars)
loss = train_fetches['loss']
if FLAGS.fp16:
loss *= ctx.get_loss_scale_var()
lr = lr_builder()
optimizer = optim_builder(lr)
optimizer.minimize(loss)
if FLAGS.fp16:
loss /= ctx.get_loss_scale_var()
# parse train fetches
train_keys, train_values, _ = parse_fetches(train_fetches)
train_values.append(lr)
if FLAGS.eval:
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
model = create(main_arch)
eval_loader, feed_vars = create_feed(eval_feed)
fetches = model.eval(feed_vars)
eval_prog = eval_prog.clone(True)
eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
eval_loader.set_sample_list_generator(eval_reader, place)
# parse eval fetches
extra_keys = []
if cfg.metric == 'COCO':
extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg.metric == 'VOC':
extra_keys = ['gt_box', 'gt_label', 'is_difficult']
if cfg.metric == 'WIDERFACE':
extra_keys = ['im_id', 'im_shape', 'gt_box']
eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog,
extra_keys)
# compile program for multi-devices
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_all_optimizer_ops = False
build_strategy.fuse_elewise_add_act_ops = True
# only enable sync_bn in multi GPU devices
sync_bn = getattr(model.backbone, 'norm_type', None) == 'sync_bn'
build_strategy.sync_batch_norm = sync_bn and devices_num > 1 \
and cfg.use_gpu
exec_strategy = fluid.ExecutionStrategy()
# iteration number when CompiledProgram tries to drop local execution scopes.
# Set it to be 1 to save memory usages, so that unused variables in
# local execution scopes can be deleted after each iteration.
exec_strategy.num_iteration_per_drop_scope = 1
if FLAGS.dist:
dist_utils.prepare_for_multi_process(exe, build_strategy, startup_prog,
train_prog)
exec_strategy.num_threads = 1
exe.run(startup_prog)
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
if FLAGS.eval:
compiled_eval_prog = fluid.compiler.CompiledProgram(eval_prog)
fuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel'
ignore_params = cfg.finetune_exclude_pretrained_params \
if 'finetune_exclude_pretrained_params' in cfg else []
start_iter = 0
if FLAGS.resume_checkpoint:
checkpoint.load_checkpoint(exe, train_prog, FLAGS.resume_checkpoint)
start_iter = checkpoint.global_step()
elif cfg.pretrain_weights and fuse_bn and not ignore_params:
checkpoint.load_and_fusebn(exe, train_prog, cfg.pretrain_weights)
elif cfg.pretrain_weights:
checkpoint.load_params(
exe, train_prog, cfg.pretrain_weights, ignore_params=ignore_params)
train_reader = create_reader(train_feed, (cfg.max_iters - start_iter) *
devices_num, FLAGS.dataset_dir)
train_loader.set_sample_list_generator(train_reader, place)
# whether output bbox is normalized in model output layer
is_bbox_normalized = False
if hasattr(model, 'is_bbox_normalized') and \
callable(model.is_bbox_normalized):
is_bbox_normalized = model.is_bbox_normalized()
# if map_type not set, use default 11point, only use in VOC eval
map_type = cfg.map_type if 'map_type' in cfg else '11point'
train_stats = TrainingStats(cfg.log_smooth_window, train_keys)
train_loader.start()
start_time = time.time()
end_time = time.time()
cfg_name = os.path.basename(FLAGS.config).split('.')[0]
save_dir = os.path.join(cfg.save_dir, cfg_name)
time_stat = deque(maxlen=cfg.log_smooth_window)
best_box_ap_list = [0.0, 0] #[map, iter]
# use tb-paddle to log data
if FLAGS.use_tb:
from tb_paddle import SummaryWriter
tb_writer = SummaryWriter(FLAGS.tb_log_dir)
tb_loss_step = 0
tb_mAP_step = 0
for it in range(start_iter, cfg.max_iters):
start_time = end_time
end_time = time.time()
time_stat.append(end_time - start_time)
time_cost = np.mean(time_stat)
eta_sec = (cfg.max_iters - it) * time_cost
eta = str(datetime.timedelta(seconds=int(eta_sec)))
outs = exe.run(compiled_train_prog, fetch_list=train_values)
stats = {k: np.array(v).mean() for k, v in zip(train_keys, outs[:-1])}
# use tb-paddle to log loss
if FLAGS.use_tb:
if it % cfg.log_iter == 0:
for loss_name, loss_value in stats.items():
tb_writer.add_scalar(loss_name, loss_value, tb_loss_step)
tb_loss_step += 1
train_stats.update(stats)
logs = train_stats.log()
if it % cfg.log_iter == 0 and (not FLAGS.dist or trainer_id == 0):
strs = 'iter: {}, lr: {:.6f}, {}, time: {:.3f}, eta: {}'.format(
it, np.mean(outs[-1]), logs, time_cost, eta)
logger.info(strs)
if (it > 0 and it % cfg.snapshot_iter == 0 or it == cfg.max_iters - 1) \
and (not FLAGS.dist or trainer_id == 0):
save_name = str(it) if it != cfg.max_iters - 1 else "model_final"
checkpoint.save(exe, train_prog, os.path.join(save_dir, save_name))
if FLAGS.eval:
# evaluation
results = eval_run(exe, compiled_eval_prog, eval_loader,
eval_keys, eval_values, eval_cls)
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
box_ap_stats = eval_results(
results, eval_feed, cfg.metric, cfg.num_classes, resolution,
is_bbox_normalized, FLAGS.output_eval, map_type)
# use tb_paddle to log mAP
if FLAGS.use_tb:
tb_writer.add_scalar("mAP", box_ap_stats[0], tb_mAP_step)
tb_mAP_step += 1
if box_ap_stats[0] > best_box_ap_list[0]:
best_box_ap_list[0] = box_ap_stats[0]
best_box_ap_list[1] = it
checkpoint.save(exe, train_prog,
os.path.join(save_dir, "best_model"))
logger.info("Best test box ap: {}, in iter: {}".format(
best_box_ap_list[0], best_box_ap_list[1]))
train_loader.reset()
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-r",
"--resume_checkpoint",
default=None,
type=str,
help="Checkpoint path for resuming training.")
parser.add_argument(
"--fp16",
action='store_true',
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--loss_scale",
default=8.,
type=float,
help="Mixed precision training loss scale.")
parser.add_argument(
"--eval",
action='store_true',
default=False,
help="Whether to perform evaluation in train")
parser.add_argument(
"--output_eval",
default=None,
type=str,
help="Evaluation directory, default is current directory.")
parser.add_argument(
"-d",
"--dataset_dir",
default=None,
type=str,
help="Dataset path, same as DataFeed.dataset.dataset_dir")
parser.add_argument(
"--use_tb",
type=bool,
default=False,
help="whether to record the data to Tensorboard.")
parser.add_argument(
'--tb_log_dir',
type=str,
default="tb_log_dir/scalar",
help='Tensorboard logging directory for scalar.')
FLAGS = parser.parse_args()
main()
|
the-stack_106_21363
|
# -*- coding: utf-8 -*-
# @copyright: MIT License
# Copyright (c) 2018 syntactic (Pastรจque Ho)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# @summary: This file parses a JSGF Grammar and prints it out.
# @since: 2014/06/02
"""
This file parses a JSGF grammar file and returns a JSGFGrammar object. \
It uses the pyparsing module and defines a grammar for JSGF grammars. \
Upon finding a string or JSGF expression, it builds a grammar object from \
the bottom up, composing JSGF expressions with strings and lists. When the \
entire right hand side of a rule has been parsed and a JSGF expression \
object has been created of it, it gets added to the main JSGFGrammar \
object as one of its rules.
To run the parser independently and print the resulting JSGFGrammar object, \
run it as:
``python JSGFParser.py Ideas.gram``
Generally, this module should be imported and the getGrammarObject should be called \
with a ``file`` object as its argument. This function returns a grammar \
object that can be used by the Generator scripts ``DeterministicGenerator.py`` \
and ``ProbabilisticGenerator.py``.
The features of JSGF that this parser can handle include:
- rulenames
- tokens
- comments
- rule definitions
- rule expansions
- sequences
- alternatives
- weights
- grouping
- optional grouping
Notable features of JSGF that are **not** handled by this parser are:
- grammar names
- import statements
- unary operators
- tags
"""
import sys
import JSGFGrammar as gram
from pyparsing import *
sys.setrecursionlimit(100000)
usePackrat = True
def foundWeight(s, loc, toks):
"""
PyParsing action to run when a weight is found.
:returns: Weight as a floating point number
"""
# print 'found weight', toks.dump()
# print 'returning the weight', float(toks.weightAmount)
return float(toks.weightAmount)
def foundToken(s, loc, toks):
"""
PyParsing action to run when a token is found.
:returns: Token as a string
"""
# print 'found token', toks.dump()
# print 'returning the token', toks.token
return toks.token
def foundNonterminal(s, loc, toks):
"""
PyParsing action to run when a nonterminal reference is found.
:returns: NonTerminal object representing the NT reference found
"""
return gram.NonTerminal(list(toks)[0])
def foundWeightedExpression(s, loc, toks):
"""
PyParsing action to run when a weighted expression is found.
:returns: Ordered pair of the expression and its weight
"""
toks.weightedExpression = (toks.expr, toks.weight)
# print 'found weighted expression', toks.dump()
expr = list(toks.expr)
if len(expr) == 1:
expr = expr[0]
pair = (expr, toks.weight)
# print 'returning', pair
return pair
def foundPair(s, loc, toks):
"""
PyParsing action to run when a pair of alternatives are found.
:returns: Disjunction object containing all disjuncts that have been accumulated so far
"""
# print 'found pair', toks.dump()
# print 'disj1 is', list(toks.disj1), 'disj2 is', list(toks.disj2)
firstAlternative = list(toks.disj1)
secondAlternative = list(toks.disj2)
if len(firstAlternative) > 1:
disj = [firstAlternative]
else:
disj = firstAlternative
if len(secondAlternative) == 1:
if isinstance(secondAlternative[0], gram.Disjunction):
# print 'found disjuncts in second alt', secondAlternative[0].disjuncts
disj.extend(secondAlternative[0].disjuncts)
else:
disj.append(secondAlternative[0])
else:
disj.append(secondAlternative)
disj = gram.Disjunction(disj)
# print 'returing the pair', disj
return disj
def foundOptionalGroup(s, loc, toks):
"""
PyParsing action to run when an optional group is found.
:returns: Optional object containing all elements in the group
"""
# print 'optional item is', toks.optionalItem
if len(list(toks[0])) > 1:
return gram.Optional(list(toks[0]))
else:
return gram.Optional(toks.optionalItem[0])
def foundSeq(s, loc, toks):
"""
PyParsing action to run when a sequence of concatenated elements is found.
:returns: List of JSGFGrammar objects, strings, or more lists
"""
# print 'seq is', toks.dump()
# print 'length of seq is', len(list(toks[0])), list(toks[0])
if len(list(toks[0])) > 1:
# print 'seq retringin', list(toks[0]), type(list(toks[0]))
return list(toks[0])
else:
# print 'seq returning', list(toks[0])[0], type(list(toks[0])[0])
return list(toks[0])[0]
# PyParsing rule for a weight
weight = (Literal('/').suppress() + (Word(nums + '.')).setResultsName('weightAmount') +
Literal('/').suppress()).setParseAction(foundWeight).setResultsName("weight")
# PyParsing rule for a token
token = Word(pyparsing_unicode.LatinA.alphas+alphanums +
pyparsing_unicode.Latin1.alphas +
pyparsing_unicode.LatinB.alphas +
"ฤรณลลฤ
ลผลบฤล'_-,.?@").setResultsName('token').setParseAction(foundToken)
# PyParsing rule for a nonterminal reference
nonterminal = Combine(Literal('<') + Word(alphanums+'$_:;,=|/\\()[]@#%!^&~') +
Literal('>')).setParseAction(foundNonterminal).setResultsName('NonTerminal')
Sequence = Forward()
weightedExpression = (weight + Group(Sequence).setResultsName("expr")). \
setResultsName('weightedExpression').setParseAction(foundWeightedExpression)
weightAlternatives = Forward()
weightedPrime = Literal('|').suppress() + weightAlternatives
weightAlternatives << MatchFirst([(Group(weightedExpression).setResultsName("disj1") +
Group(weightedPrime).setResultsName("disj2")).
setParseAction(foundPair).setResultsName("pair"),
Group(weightedExpression).setParseAction(foundSeq)])
disj = Forward()
disjPrime = Literal('|').suppress() + disj
disj << MatchFirst([(Group(Sequence).setResultsName("disj1") + Group(disjPrime).
setResultsName("disj2")).setParseAction(foundPair).
setResultsName("pair"), Group(Sequence).setParseAction(foundSeq)])
topLevel = MatchFirst([disj, weightAlternatives])
StartSymbol = Optional(Literal('public')).setResultsName('public') + \
nonterminal.setResultsName('identifier') + \
Literal('=').suppress() + \
Group(topLevel).setResultsName('ruleDef') + Literal(';').suppress() + stringEnd
Expression = MatchFirst([nonterminal, token])
Grouping = Literal('(').suppress() + topLevel + Literal(')').suppress()
OptionalGrouping = (Literal('[').suppress() + Group(topLevel).setResultsName("optionalItem") +
Literal(']').suppress()).setParseAction(foundOptionalGroup)
Sequence << Group(OneOrMore(MatchFirst([Grouping, OptionalGrouping, Expression]))). \
setResultsName("seq").setParseAction(foundSeq)
def nocomment(oldline):
"""
Removes a comment from a line
:param oldline: String representing the original line
:returns: String with the same semantic content, with the comments stripped
"""
if '//' in oldline:
return oldline.replace(oldline, oldline[0:oldline.index('//')])
elif '*' in oldline:
return oldline.replace(oldline, '')
else:
return oldline
def getGrammarObject(fileStream):
"""
Produces a JSGFGrammar object from a stream of text, the grammar object has a set of public
rules and regular rules
:param fileStream: file object containing the contents of the grammar file
:type fileStream: file object
:returns: JSGFGrammar object
"""
linegenerator = fileStream
lines = linegenerator.readlines()
for i in range(len(lines)):
lines[i] = nocomment(lines[i])
# buffer will accumulate lines until a fully parseable piece is found
buffer = ""
grammar = gram.Grammar()
for line in lines:
buffer += line
match = next(StartSymbol.scanString(buffer), None)
while match:
tokens, start, end = match
if 'public' in tokens.keys():
grammar.addPublicRule(gram.Rule(tokens.identifier, list(tokens.ruleDef)))
grammar.addRule(gram.Rule(tokens.identifier, list(tokens.ruleDef)))
buffer = buffer[end:]
match = next(StartSymbol.scanString(buffer), None)
return grammar
if __name__ == '__main__':
fileStream = open(sys.argv[1])
grammar = getGrammarObject(fileStream)
print(grammar)
|
the-stack_106_21366
|
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
"""Functions that should be in scipy."""
import numpy as np
from scipy.sparse import issparse
def dot(a, b):
"""A dot product that also works on sparse matrices."""
if issparse(a):
return a.dot(b)
elif issparse(b):
return b.T.dot(a.T).T
else:
return np.dot(a, b)
def toarray(matrix, squeeze=True):
"""
This is a shame, this should be in scipy. Converts any matrix to ndarray.
"""
if issparse(matrix):
A = matrix.toarray()
elif isinstance(matrix, np.matrix):
A = matrix.A
else:
A = np.asanyarray(matrix)
return np.squeeze(A) if squeeze else A
|
the-stack_106_21367
|
'''
Return the number of times that the string "hi"
appears anywhere in the given string.
'''
def count_hi(str):
count = 0
for i in range(len(str)-1):
if str[i] == "h" and str[i+1] == "i":
count += 1
return count
# count_hi('abc hi ho') โ 1
# count_hi('ABChi hi') โ 2
# count_hi('hihi') โ 2
|
the-stack_106_21368
|
import logging
import zmq
import os
import json
from taboo.util import *
logger = logging.getLogger(__name__)
class TabooAttacker:
def __init__(self, host="127.0.0.1", port=10086, name="Attacker"):
self.name = name
self.context = zmq.Context()
self.connection = self.context.socket(zmq.REP)
self.connection.bind("tcp://%s:%d" % (host, port))
data = self.connection.recv_json()
if data["code"] == INIT:
logger.info("Connection to server established.")
self.task_setting = data["data"]
self.connection.send_json({
"code": ATTACKER_FEEDBACK,
"data": self.name
})
else:
logger.error("Unknown codes from server, raise error")
raise NotImplementedError
data = self.connection.recv_json()
if data["code"] == WORD_SELECT:
logger.info("Receive word list from judger: %s" % (json.dumps(data["data"])))
self.word_list = data["data"]
else:
logger.error("Unknown codes from server, raise error")
raise NotImplementedError
def get_task_setting(self):
return self.task_setting
def select_word(self, idx):
logger.info("Selects the word: %s" % self.word_list[idx])
self.connection.send_json({
"code": WORD_SELECT,
"data": idx
})
self.connection.recv_json()
def attack(self, sent):
self.connection.send_json({
"code": ATTACK,
"data": sent
})
data = self.connection.recv_json()
if data["code"] == DEFENDER_CORRUPT:
logger.info("Defender corrupts")
logging.info("Defender says: %s" % data["data"])
if data["code"] in END_CODE_SET:
if data["code"] == ATTACKER_WIN:
logger.info("Attacker wins")
elif data["code"] == DEFENDER_WIN:
logger.info("Defender wins")
elif data["code"] == DRAW:
logger.info("Draw")
return data
if __name__ == "__main__":
os.system("clear")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
attacker = TabooAttacker(port=23333)
attacker.select_word(0)
while True:
print("Type your sentence: ", end='')
sent = input().strip()
data = attacker.attack(sent)
if data["code"] in END_CODE_SET:
break
|
the-stack_106_21369
|
#!/usr/bin/env python
"""Simple tools to query github.com and gather repo information.
Taken from ipython
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from packaging.version import parse
import json
import re
import sys
import os
import argparse
import operator
from datetime import datetime, timedelta
from subprocess import check_output
from urllib.request import urlopen, Request
# ----------------------------------------------------------------------------
# Globals
# ----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
LAST_RELEASE = datetime(2015, 3, 18)
CONTRIBUTORS_FILE = "contributors.json"
GH_TOKEN = os.environ.get('GH_TOKEN', '')
# ----------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------
def fetch_url(url):
"""
Notes
-----
This was pointed out as a Security issue in bandit.
please look at issue #355,
we fixed it, but the bandit warning might remain,
need to suppress it manually (just ignore it)
"""
req = Request(url)
if GH_TOKEN:
req.add_header('Authorization', 'token {0}'.format(GH_TOKEN))
try:
print("fetching %s" % url, file=sys.stderr)
# url = Request(url,
# headers={'Accept': 'application/vnd.github.v3+json',
# 'User-agent': 'Defined'})
if not url.lower().startswith('http'):
msg = "Please make sure you use http/https connection"
raise ValueError(msg)
f = urlopen(req)
except Exception as e:
print(e)
print("return Empty data", file=sys.stderr)
return {}
return f
def parse_link_header(headers):
link_s = headers.get('link', '')
urls = element_pat.findall(link_s)
rels = rel_pat.findall(link_s)
d = {}
for rel, url in zip(rels, urls):
d[rel] = url
return d
def get_json_from_url(url):
"""Fetch and read url."""
f = fetch_url(url)
return json.load(f) if f else {}
def get_paged_request(url):
"""Get a full list, handling APIv3's paging."""
results = []
while url:
f = fetch_url(url)
if not f:
continue
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
return results
def get_issues(project="fury-gl/fury", state="closed", pulls=False):
"""Get a list of the issues from the Github API."""
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % \
(project, which, state, PER_PAGE)
return get_paged_request(url)
def get_tags(project="fury-gl/fury"):
"""Get a list of the tags from the Github API."""
url = "https://api.github.com/repos/{0}/tags".format(project)
return get_paged_request(url)
def fetch_basic_stats(project="fury-gl/fury"):
"""Fetch the basic stats.
Returns
-------
basic_stats : dict
A dictionary containing basic statistics. For example:
{ 'subscribers': 41,
'forks': 142,
'forks_url': 'https://github.com/fury-gl/fury/network'
'watchers': 94,
'open_issues': 154,
'stars': 94,
'stars_url': 'https://github.com/fury-gl/fury/stargazers'
}
"""
desired_keys = ["stargazers_count", "stargazers_url", "watchers_count",
"watchers_url", "forks_count", "forks_url", "open_issues",
"issues_url", "subscribers_count", "subscribers_url"]
url = "https://api.github.com/repos/{0}".format(project)
r_json = get_json_from_url(url)
basic_stats = dict((k, r_json[k]) for k in desired_keys if k in r_json)
return basic_stats
def fetch_contributor_stats(project="fury-gl/fury"):
"""Fetch stats of contributors.
Returns
-------
contributor_stats : dict
A dictionary containing contributor statistics. For example:
{'total_contributors': 50,
'total_commits': 6031,
'contributors': [ {
"user_name": "Garyfallidis"
"avatar_url":"https://avatars.githubusercontent.com/u/134276?v=3",
"html_url": "https://github.com/Garyfallidis",
"total_commits": 1389,
"total_additions": 116712,
"total_deletions": 70340,
"weekly_commits": [
{
"w": "1367712000",
"a": 6898,
"d": 77,
"c": 10
},
]
},
]
}
"""
url = "https://api.github.com/repos/{0}/stats/contributors".format(project)
r_json = get_json_from_url(url)
contributor_stats = {}
contributor_stats["total_contributors"] = len(r_json)
contributor_stats["contributors"] = []
cumulative_commits = 0
desired_keys = ["login", "avatar_url", "html_url"]
with open(os.path.join(os.path.dirname(__file__), "..",
CONTRIBUTORS_FILE)) as f:
extra_info = json.load(f)
extra_info = extra_info["team"] + extra_info["core_team"]
for contributor in r_json:
contributor_dict = dict((k, contributor["author"][k])
for k in desired_keys
if k in contributor["author"])
# check if "author" is null
if not contributor_dict["login"]:
continue
# Replace key name
contributor_dict["username"] = usrname = contributor_dict.pop("login")
contributor_dict["nb_commits"] = contributor["total"]
# Add extra information like fullname and affiliation
l_extra_info = [e for e in extra_info
if e["username"].lower() == usrname.lower()]
l_extra_info = l_extra_info[0] if l_extra_info else {}
contributor_dict.update(l_extra_info)
# Update total commits
cumulative_commits += contributor_dict["nb_commits"]
total_additions = 0
total_deletions = 0
for week in contributor["weeks"]:
total_additions += week['a']
total_deletions += week['d']
contributor_dict["total_additions"] = total_additions
contributor_dict["total_deletions"] = total_deletions
# contributor_dict["weekly_commits"] = contributor["weeks"]
contributor_stats["contributors"].insert(0, contributor_dict)
contributor_stats["contributors"] = sorted(
contributor_stats["contributors"],
key=lambda x: x.get('nb_commits'),
reverse=True)
contributor_stats["total_commits"] = cumulative_commits
return contributor_stats
def cumulative_contributors(project="fury-gl/fury", show=True):
"""Calculate total contributors as new contributors join with time.
Parameters
----------
contributors_list : list
List of contributors with weeks of contributions. Example:
[
{
'weeks': [
{'w': 1254009600, 'a': 5, 'c': 2, 'd': 9},
],
.....
},
]
"""
url = "https://api.github.com/repos/{0}/stats/contributors".format(project)
r_json = get_json_from_url(url)
contributors_join_date = {}
for contributor in r_json:
for week in contributor["weeks"]:
if(week["c"] > 0):
join_date = week['w']
if join_date not in contributors_join_date:
contributors_join_date[join_date] = 0
contributors_join_date[join_date] += 1
cumulative_join_date = {}
cumulative = 0
for time in sorted(contributors_join_date):
cumulative += contributors_join_date[time]
cumulative_join_date[time] = cumulative
cumulative_list = list(cumulative_join_date.items())
cumulative_list.sort()
if show:
from datetime import datetime
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
years, c_cum = zip(*cumulative_list)
years_ticks = np.linspace(min(years), max(years), 15)
years_labels = []
for y in years_ticks:
date = datetime.utcfromtimestamp(int(y))
date_str = "Q{} - ".format((date.month - 1) // 3 + 1)
date_str += date.strftime('%Y')
years_labels.append(date_str)
plt.fill_between(years, c_cum,
color="skyblue", alpha=0.4)
plt.plot(years, c_cum, color="Slateblue",
alpha=0.6, linewidth=2)
plt.tick_params(labelsize=12)
plt.xticks(years_ticks, years_labels, rotation=45, fontsize=8)
plt.yticks(fontsize=8)
plt.xlabel('Date', size=12)
plt.ylabel('Contributors', size=12)
plt.ylim(bottom=0)
plt.grid(True)
plt.legend([mpatches.Patch(color='skyblue'), ],
['Contributors', ], bbox_to_anchor=(0.5, 1.1),
loc='upper center')
plt.savefig('fury_cumulative_contributors.png', dpi=150)
plt.show()
return cumulative_list
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return 'pull_request_url' in issue
def issues_closed_since(period=LAST_RELEASE, project="fury-gl/fury",
pulls=False):
"""Get all issues closed since a particular point in time.
period can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
period = datetime.now() - period
url = ("https://api.github.com/repos/%s/%s?state=closed&sort=updated&"
"since=%s&per_page=%i") % (project, which,
period.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url)
# allclosed = get_issues(project=project, state='closed', pulls=pulls,
# since=period)
filtered = [i for i in allclosed
if _parse_datetime(i['closed_at']) > period]
# exclude rejected PRs
if pulls:
filtered = [pr for pr in filtered if pr['merged_at']]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key=lambda i: i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title."""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title']))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title']))
def get_all_versions(ignore='', project="fury-gl/fury"):
"""Return all releases version.
Parameters
----------
ignore: str
skip a version number between (default: '')
you can skip minor, or micro version number, it
will be replace by x
project: str
repo path
Returns
-------
l_version: list of str
versions number list
"""
tags = get_tags(project=project)
l_version = [t['name'] for t in tags]
if ignore.lower() in ['micro', 'minor']:
l_version = list(set([re.sub(r'(\d+)$', 'x', v) for v in l_version]))
if ignore.lower() == 'minor':
l_version = list(set([re.sub(r'\.(\d+)\.', '.x.', v)
for v in l_version]))
return l_version
def version_compare(current_version, version_number, op='eq',
all_versions=None):
"""Compare doc version. This is afilter for sphinx template."""
p = re.compile(r'(\d+)\.(\d+)')
d_operator = {'<': operator.lt,
'lt': operator.lt,
'<=': operator.le,
'le': operator.le,
'>': operator.gt,
'gt': operator.gt,
'>=': operator.ge,
'ge': operator.ge,
'==': operator.eq,
'=': operator.eq,
'eq': operator.eq,
'!=': operator.ne,
}
# Setup default value to op
if op not in d_operator.keys():
op = 'eq'
# check dev page
if current_version.lower() == 'dev':
return 'post' in version_number
# major and minor extraction
current = p.search(current_version)
ref = p.search(version_number)
if current_version.lower() == 'latest':
# Check if it is the latest release
all_versions = all_versions or get_all_versions()
if not all_versions:
return False
last_version = sorted(all_versions)[0]
last_version = p.search(last_version)
if parse(last_version.group()) == \
parse(ref.group()) and \
'post' not in version_number:
return True
return False
if 'post' in version_number:
return False
return d_operator[op](parse(current.group()),
parse(ref.group()))
def username_to_fullname(all_authors):
with open(os.path.join(os.path.dirname(__file__), "..",
CONTRIBUTORS_FILE)) as f:
extra_info = json.load(f)
extra_info = extra_info["team"] + extra_info["core_team"]
extra_info = {i["username"]: i['fullname'] for i in extra_info}
curent_authors = all_authors
for i, author in enumerate(all_authors):
if author[2:] in extra_info.keys():
curent_authors[i] = '* ' + extra_info[author[2:]]
return curent_authors
def github_stats(**kwargs):
"""Get release github stats."""
# Whether to add reST urls for all issues in printout.
show_urls = True
save = kwargs.get('save', None)
if save:
version = kwargs.get('version', 'vx.x.x')
fname = "releasev" + version + '.rst'
fpath = os.path.join(os.path.dirname(__file__), '..', 'release_notes',
fname)
f = open(fpath, 'w')
orig_stdout = sys.stdout
sys.stdout = f
print(".. _{}".format(fname.replace('.rst', ':')))
print()
print(("==============================\n"
" Release notes v{}\n ()"
"==============================").format(version))
# By default, search one month back
tag = kwargs.get('tag', None)
if tag is None:
if len(sys.argv) > 1:
try:
days = int(sys.argv[1])
except Exception:
tag = sys.argv[1]
else:
tag = check_output(['git', 'describe', '--abbrev=0'],
universal_newlines=True).strip()
if tag:
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, _ = check_output(cmd,
universal_newlines=True).strip().rsplit(' ',
1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
else:
since = datetime.now() - timedelta(days=days)
print("fetching GitHub stats since %s (tag: %s)" % (since, tag),
file=sys.stderr)
# turn off to play interactively without redownloading, use %run -i
if 1:
issues = issues_closed_since(since, pulls=False)
pulls = issues_closed_since(since, pulls=True)
# For regular reports, it's nice to show them in reverse chronological
# order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or"
" contain duplicates.")
print()
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits = check_output(cmd, universal_newlines=True).splitlines()
ncommits = len(ncommits)
author_cmd = ['git', 'log', '--format=* %aN', since_tag]
all_authors = check_output(author_cmd, universal_newlines=True) \
.splitlines()
# Replace username by author name
all_authors = username_to_fullname(all_authors)
unique_authors = sorted(set(all_authors))
if len(unique_authors) == 0:
print("No commits during this period.")
else:
print("The following %i authors contributed %i commits." %
(len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
print()
print()
print("We closed a total of %d issues, %d pull requests and %d"
" regular issues;\nthis is the full list (generated with"
" the script \n:file:`tools/github_stats.py`):" %
(n_total, n_pulls, n_issues))
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
if save:
sys.stdout = orig_stdout
f.close()
# ----------------------------------------------------------------------------
# Sphinx connection
# ----------------------------------------------------------------------------
def add_jinja_filters(app):
app.builder.templates.environment.filters['version_compare'] = \
version_compare
def setup(app):
"""
- Create releases information
- Collect and clean authors
- Adds extra jinja filters.
"""
app.connect("builder-inited", add_jinja_filters)
app.add_css_file("css/custom_github.css")
# ----------------------------------------------------------------------------
# Main script
# ----------------------------------------------------------------------------
if __name__ == "__main__":
# e.g github_tools.py --tag=v0.1.3 --save --version=0.1.4
parser = argparse.ArgumentParser()
parser.add_argument("--tag", type=str, default=None,
help='from which tag version to get information')
parser.add_argument("--version", type=str, default='',
help='current release version')
parser.add_argument("--save", dest='save', action='store_true',
default=False, help=("Save in the release folder"
"and add rst header")
)
args = parser.parse_args()
github_stats(**vars(args))
|
the-stack_106_21370
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: Luke Campagnola
# -----------------------------------------------------------------------------
import struct
import zlib
import numpy as np
def make_png(data, level=6):
"""Convert numpy array to PNG byte array.
Parameters
----------
data : numpy.ndarray
Data must be (H, W, 3 | 4) with dtype = np.ubyte (np.uint8)
level : int
https://docs.python.org/2/library/zlib.html#zlib.compress
An integer from 0 to 9 controlling the level of compression:
1 is fastest and produces the least compression,
9 is slowest and produces the most.
0 is no compression.
The default value is 6.
Returns
-------
png : array
PNG formatted array
"""
# Eventually we might want to use ext/png.py for this, but this
# routine *should* be faster b/c it's speacialized for our use case
def mkchunk(data, name):
if isinstance(data, np.ndarray):
size = data.nbytes
else:
size = len(data)
chunk = np.empty(size + 12, dtype=np.ubyte)
chunk.data[0:4] = np.array(size, '>u4').tostring()
chunk.data[4:8] = name.encode('ASCII')
chunk.data[8:8 + size] = data
# and-ing may not be necessary, but is done for safety:
# https://docs.python.org/3/library/zlib.html#zlib.crc32
chunk.data[-4:] = np.array(zlib.crc32(chunk[4:-4]) & 0xffffffff,
'>i4').tostring()
return chunk
if data.dtype != np.ubyte:
raise TypeError('data.dtype must be np.ubyte (np.uint8)')
dim = data.shape[2] # Dimension
if dim not in (3, 4):
raise TypeError('data.shape[2] must be in (3, 4)')
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
if dim == 4:
ctyp = 0b0110 # RGBA
else:
ctyp = 0b0010 # RGB
# www.libpng.org/pub/png/spec/1.2/PNG-Structure.html
header = b'\x89PNG\x0d\x0a\x1a\x0a' # header
h, w = data.shape[:2]
depth = data.itemsize * 8
ihdr = struct.pack('!IIBBBBB', w, h, depth, ctyp, 0, 0, 0)
c1 = mkchunk(ihdr, 'IHDR')
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IDAT
# insert filter byte at each scanline
idat = np.empty((h, w * dim + 1), dtype=np.ubyte)
idat[:, 1:] = data.reshape(h, w * dim)
idat[:, 0] = 0
comp_data = zlib.compress(idat, level)
c2 = mkchunk(comp_data, 'IDAT')
c3 = mkchunk(np.empty((0,), dtype=np.ubyte), 'IEND')
# concatenate
lh = len(header)
png = np.empty(lh + c1.nbytes + c2.nbytes + c3.nbytes, dtype=np.ubyte)
png.data[:lh] = header
p = lh
for chunk in (c1, c2, c3):
png[p:p + len(chunk)] = chunk
p += chunk.nbytes
return png
|
the-stack_106_21371
|
"""NodeCHUP.
Usage:
nodechup install --base-directory-path=<base-directory-path> --version=<version> [--update-default-version]
Options:
-h --help Show this screen.
--base-directory-path=<base-directory-path> Path to base directory
--version=<version> Version of Node.js installation. Exits with RC 78 if Node.js version is already installed.
--update-default-version Update default point release for major/minor version.
"""
import sys
from docopt import docopt
from schema import Schema
from nodechup.nodejs import BaseDirectory, Installation, NodeJSAlreadyInstalled
"""Program to manage Node.js installations."""
def main() -> None:
"""Spawn relevant class for CLI function."""
# Validate input
args = docopt(__doc__)
schema = Schema(
{
"install": bool,
"--base-directory-path": str,
"--version": str,
"--update-default-version": bool,
}
)
args = schema.validate(args)
# Run classes
if args["install"]:
base_directory = BaseDirectory(path=args["--base-directory-path"])
installation = Installation(
base_directory=base_directory, version=args["--version"]
)
try:
installation.download(
update_default_version=args["--update-default-version"]
)
except NodeJSAlreadyInstalled:
print(
"Node.js installation with the specified version already exists in the base directory"
)
sys.exit(78)
|
the-stack_106_21374
|
# -*- coding: utf-8 -*-
import app_automatico
import sys
from layout import *
from PyQt4 import *
from layout_2 import *
class resultadoValidacao(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_resultadoValidacao()
self.ui.setupUi(self)
def nomeFazenda(self):
self.nome = "Fazenda TESTE Tracajรกs";
def areaCar(self):
self.area = "33517020";
def numeroCar(self):
self.numero = "997B3";
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
meuApp = resultadoValidacao()
meuApp.show()
sys.exit(app.exec_())
|
the-stack_106_21375
|
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Message:
def __init__(self):
self.messages = []
def add_unique(self, item, priority=-1):
msg = {"text": item, "priority": priority}
if msg not in self.messages:
self.messages.append(msg)
def add(self, item, priority=-1):
self.messages.append({"text": item, "priority": priority})
def to_html(self):
if len(self.messages) == 0:
return ""
else:
sorted_msgs = sorted(self.messages, key=lambda i: i["priority"], reverse=True)
html = "<ul>"
for msg in sorted_msgs:
msgTxt = msg["text"]
html += f"<li>{msgTxt}</li>"
html += "</ul>"
return html
|
the-stack_106_21376
|
# import xml.etree.ElementTree as ET
from lxml import etree as ET
import Helper
inertiaKey = ['ixx', 'ixy', 'ixz', 'iyy', 'iyz', 'izz']
# add a material
def addMaterial(parent, name, rgba):
material = ET.SubElement(parent, 'material')
material.set('name', name)
color = ET.SubElement(material, 'color')
color.set('rbga', f'{rgba[0]} {rgba[1]} {rgba[2]} {rgba[3]}')
def addLink(name, root, massValue, inertiaValue, size=None, materialName=None):
# add link
link = ET.SubElement(robot, 'link')
link.set('name', name)
# add inertia
inertial = ET.SubElement(link, 'inertial')
mass = ET.SubElement(inertial, 'mass')
mass.set('value', f'{massValue}')
inertia = ET.SubElement(inertial, 'inertia')
for i in range(6):
inertia.set(inertiaKey[i], f'{inertiaValue[i]}')
# add visual and collision
if size is not None:
visual = ET.SubElement(link, 'visual')
ET.SubElement(visual, 'matertial', attrib={'name': materialName})
geometry = ET.SubElement(visual, 'geometry')
ET.SubElement(geometry, 'box', attrib={'size': f'{size[0]} {size[1]} {size[2]}'})
collision = ET.SubElement(link, 'collision')
geometry = ET.SubElement(collision, 'geometry')
ET.SubElement(geometry, 'box', attrib={'size': f'{size[0]} {size[1]} {size[2]}'})
return link
# add joint
def addFixedJoint(root, parent, child, xyz, rpy):
joint = ET.SubElement(root, 'joint', attrib={'name': f'{parent}_to_{child}', 'type': 'fixed'})
ET.SubElement(joint, 'origin', attrib={'xyz': f'{xyz[0]} {xyz[1]} {xyz[2]}', 'rpy': f'{rpy[0]} {rpy[1]} {rpy[2]}'})
ET.SubElement(joint, 'parent', attrib={'link': parent})
ET.SubElement(joint, 'child', attrib={'link': child})
def addObstacle(name, size, root, parent, pos, rot, materialName='grey'):
addLink(name, root, massValue=1.0, inertiaValue=[1.0, 0.0, 0.0, 1.0, 0.0, 1.0], size=size, materialName=materialName)
addFixedJoint(root, parent, name, pos, rot)
if __name__ == "__main__":
robot = ET.Element('robot')
robot.set('name', 'forest')
materialName = 'gray'
rgba = [0.3, 0.3, 0.3, 1.0]
addMaterial(robot, materialName, rgba)
materialName = 'gold'
rgba = [0.0, 0.8, 0.0, 1.0]
addMaterial(robot, materialName, rgba)
materialName = 'deep'
rgba = [0.0, 0.9, 0.9, 1.0]
addMaterial(robot, materialName, rgba)
# add a base
name = 'base'
massValue = 0.0
inertiaValue = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
base = addLink(name, robot, massValue, inertiaValue)
pos_ = []
size_ = []
rot_ = []
material_ = []
pos_.append([-11, 0.9, -7])
size_.append([2, 2, 2])
pos_.append([-11, 0.9, 3])
size_.append([2, 2, 10])
pos_.append([-5, 0.9, -4])
size_.append([2, 2, 8])
pos_.append([-5, 0.9, 5])
size_.append([2, 2, 6])
pos_.append([-1, 0.9, 6])
size_.append([2, 2, 4])
pos_.append([1, 0.9, -3])
size_.append([2, 2, 6])
pos_.append([7, 0.9, -2])
size_.append([2, 2, 12])
pos_.append([7, 0.9, 7])
size_.append([2, 2, 2])
pos_.append([11, 0.9, 6])
size_.append([2, 2, 4])
pos_.append([11, 0.9, -1.25])
size_.append([2, 2, 9.5])
for i in range(len(size_)):
rot_.append([0, 0, 0])
material_.append('gray')
# golden box
gap = 0.6
length = 1.5*2**0.5 - gap/2
size_.append([length, 2, length])
size_.append([length, 2, length])
rot_.append([0, 1.57/2, 0])
rot_.append([0, 1.57/2, 0])
material_.append('gold')
material_.append('gold')
pos_.append([-1, 0.9, 4-length/2**0.5])
pos_.append([1, 0.9, 0+length/2**0.5])
# upper and lower boundary
size_.append([40, 2, 2])
size_.append([40, 2, 2])
rot_.append([0, 0, 0])
rot_.append([0, 0, 0])
material_.append('deep')
material_.append('deep')
pos_.append([0, 0.9, -9])
pos_.append([0, 0.9, 9])
for i in range(len(size_)):
addObstacle(f'obstacle_{i}', size_[i], robot, parent='base', pos=pos_[i], rot=rot_[i], materialName=material_[i])
tree = ET.ElementTree(robot)
tree.write(Helper.findURDF('forest.urdf'), pretty_print=True, encoding='utf-8')
|
the-stack_106_21379
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from itertools import count
from flask import flash, jsonify, request, session
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import get_default_values
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.layout import layout_settings, logger
from indico.modules.events.layout.forms import MenuBuiltinEntryForm, MenuLinkForm, MenuPageForm
from indico.modules.events.layout.models.menu import EventPage, MenuEntry, MenuEntryType
from indico.modules.events.layout.util import menu_entries_for_event
from indico.modules.events.layout.views import WPMenuEdit, WPPage
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.models.events import EventType
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
from indico.web.forms.base import FormDefaults
from indico.web.util import jsonify_data, jsonify_form
def _render_menu_entry(entry):
tpl = get_template_module('events/layout/_menu.html')
return tpl.menu_entry(entry=entry)
def _render_menu_entries(event, connect_menu=False):
tpl = get_template_module('events/layout/_menu.html')
return tpl.menu_entries(menu_entries_for_event(event), connect_menu=connect_menu)
class RHMenuBase(RHManageEventBase):
def _check_access(self):
RHManageEventBase._check_access(self)
if self.event.type_ != EventType.conference:
raise NotFound('Only conferences have a menu')
def _require_custom_menu(self):
if not layout_settings.get(self.event, 'use_custom_menu'):
raise Forbidden('The menu cannot be changed unless menu customization is enabled')
class RHMenuEdit(RHMenuBase):
def _process(self):
custom_menu_enabled = layout_settings.get(self.event, 'use_custom_menu')
menu = menu_entries_for_event(self.event) if custom_menu_enabled else None
return WPMenuEdit.render_template('menu_edit.html', self.event, menu=menu,
custom_menu_enabled=custom_menu_enabled)
class RHMenuToggleCustom(RHMenuBase):
def _process(self):
enabled = request.form['enabled'] == '1'
if enabled:
# nothing else to do here. menu items are added to the DB when retrieving the menu
flash(_('Menu customization has been enabled.'), 'success')
else:
for entry in MenuEntry.query.with_parent(self.event):
db.session.delete(entry)
flash(_('Menu customization has been disabled.'), 'success')
layout_settings.set(self.event, 'use_custom_menu', enabled)
logger.info('Menu customization for %s %s by %s', self.event, 'enabled' if enabled else 'disabled',
session.user)
return jsonify(enabled=enabled)
class RHMenuEntryEditBase(RHMenuBase):
normalize_url_spec = {
'locators': {
lambda self: self.entry
}
}
def _check_access(self):
RHMenuBase._check_access(self)
self._require_custom_menu()
def _process_args(self):
RHMenuBase._process_args(self)
self.entry = MenuEntry.get_or_404(request.view_args['menu_entry_id'])
class RHMenuEntryEdit(RHMenuEntryEditBase):
def _process(self):
defaults = FormDefaults(self.entry)
if self.entry.is_user_link:
form_cls = MenuLinkForm
elif self.entry.is_page:
form_cls = MenuPageForm
defaults['html'] = self.entry.page.html
else:
form_cls = MenuBuiltinEntryForm
defaults = FormDefaults(self.entry, skip_attrs={'title'},
title=self.entry.title or self.entry.default_data.title,
custom_title=self.entry.title is not None)
form = form_cls(entry=self.entry, obj=defaults)
if form.validate_on_submit():
form.populate_obj(self.entry, skip={'html', 'custom_title'})
if self.entry.is_page:
self.entry.page.html = form.html.data
return jsonify_data(entry=_render_menu_entry(self.entry))
return jsonify_form(form)
class RHMenuEntryPosition(RHMenuEntryEditBase):
def _process(self):
position = request.form.get('position')
try:
position = int(position)
except (TypeError, ValueError):
position = None
parent_id = request.form.get('parent_id')
try:
parent_id = int(parent_id)
except (TypeError, ValueError):
parent_id = None
if parent_id != self.entry.parent_id:
if self.entry.type not in {MenuEntryType.user_link, MenuEntryType.page}:
raise BadRequest('Menu entry "{0}" cannot be moved to another menu: Invalid type "{0.type.name}".'
.format(self.entry))
if self.entry.is_root and self.entry.children:
raise BadRequest('Menu entry "{0}" cannot be moved to another menu: Entry has nested entries.'
.format(self.entry))
parent_entry = None
if parent_id is not None:
parent_entry = (MenuEntry.query.with_parent(self.event)
.filter(MenuEntry.type.in_({MenuEntryType.user_link, MenuEntryType.page}),
MenuEntry.id == parent_id,
MenuEntry.parent_id.is_(None))
.first())
if not parent_entry:
raise BadRequest('New parent entry not found for Menu entry "{0}".'.format(self.entry))
self.entry.insert(parent_entry, position)
else:
self.entry.move(position)
return jsonify_data(flash=False)
class RHMenuEntryToggleEnabled(RHMenuEntryEditBase):
def _process(self):
self.entry.is_enabled = not self.entry.is_enabled
return jsonify(is_enabled=self.entry.is_enabled)
class RHMenuEntryToggleDefault(RHMenuEntryEditBase):
def _process(self):
if self.entry.type != MenuEntryType.page:
raise BadRequest
if self.event.default_page == self.entry.page:
is_default = False
self.event.default_page = None
else:
is_default = True
self.event.default_page = self.entry.page
return jsonify(is_default=is_default)
class RHMenuAddEntry(RHMenuBase):
def _check_access(self):
RHMenuBase._check_access(self)
self._require_custom_menu()
def _process(self):
defaults = FormDefaults(get_default_values(MenuEntry))
entry_type = request.args['type']
if entry_type == MenuEntryType.separator.name:
entry = MenuEntry(event=self.event, type=MenuEntryType.separator)
db.session.add(entry)
db.session.flush()
return jsonify_data(flash=False, entry=_render_menu_entry(entry))
elif entry_type == MenuEntryType.user_link.name:
form_cls = MenuLinkForm
elif entry_type == MenuEntryType.page.name:
form_cls = MenuPageForm
else:
raise BadRequest
form = form_cls(obj=defaults)
if form.validate_on_submit():
entry = MenuEntry(event=self.event, type=MenuEntryType[entry_type])
form.populate_obj(entry, skip={'html'})
if entry.is_page:
page = EventPage(html=form.html.data)
self.event.custom_pages.append(page)
entry.page = page
db.session.add(entry)
db.session.flush()
return jsonify_data(entry=_render_menu_entry(entry))
return jsonify_form(form)
class RHMenuDeleteEntry(RHMenuEntryEditBase):
def _process(self):
if self.entry.type not in (MenuEntryType.user_link, MenuEntryType.page, MenuEntryType.separator):
raise BadRequest('Menu entry of type {} cannot be deleted'.format(self.entry.type.name))
position_gen = count(self.entry.position)
if self.entry.children:
for child in self.entry.children:
child.parent_id = self.entry.parent_id
child.position = next(position_gen)
with db.session.no_autoflush:
entries = (MenuEntry.query.with_parent(self.event)
.filter(MenuEntry.parent_id == self.entry.parent_id,
MenuEntry.position >= self.entry.position,
MenuEntry.id != self.entry.id)
.order_by(MenuEntry.position)
.all())
for entry in entries:
entry.position = next(position_gen)
db.session.delete(self.entry)
db.session.flush()
return jsonify_data(flash=False, menu=_render_menu_entries(self.event, connect_menu=True))
class RHPageDisplay(RHDisplayEventBase):
normalize_url_spec = {
'locators': {
lambda self: self.page
}
}
def _process_args(self):
RHDisplayEventBase._process_args(self)
self.page = EventPage.get_or_404(request.view_args['page_id'])
def _process(self):
return WPPage.render_template('page.html', self.event, page=self.page)
|
the-stack_106_21381
|
#!/usr/bin/env python
################################################################################
# MIT License
#
# Copyright (c) 2021 Yoshifumi Asakura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
### to define functions to run on the ode
################
### imports
################
import numpy as np
import time
import sys
import os
import shutil
import pyper
import math
import pandas as pd
################
### 2D comarinson
################
class Compare2D:
def __init__(self, R_lag, R_part):
self.r = pyper.R()
comg = [
"library(tidyverse)",
"library(gganimate)",
"load('%s')" % R_lag,
"load('%s')" % R_part,
"df_p2$model <- 'particle'",
"df_l2$model <- 'continuum'",
"df <- rbind(df_l2, df_p2)",
#"len_anim <- length(unique(df$time)) * 0.1"
"numframes <- as.integer(length(unique(df$time)) / 2)"
]
self.r("\n".join(comg))
print("2D comparison initialized")
#
#
def draw(self, moviefile, lastframe = None):
renderer = "ffmpeg_renderer()"
comg = [
#"print(ls())",
"g000 <- ggplot(df, aes(x, y)) + geom_path(",
" aes(group = cell)",
") + theme_classic(",
") + coord_fixed(",
" ratio = 1.0",
") + transition_manual(",
" time_f",
") + labs(",
" title = 'time: {current_frame}'",
")",
"anim_save(",
" filename = '%s'," % moviefile,
" animation = g000,",
" renderer = %s," % renderer,
#" duration = len_anim,",
" nframes = numframes,",
" fps = 20,",
" width = 160, height = 160, unit = 'mm', res = 360",
")"
]
print("start rendering...")
#
#print(self.r("\n".join(comg)))
self.r("\n".join(comg))
#
if os.path.exists(moviefile):
print("saved %s" % moviefile)
else:
print("failed %s" % moviefile)
#
if not lastframe is None:
comg = [
"g000 <- ggplot(subset(df, time == max(df$time)), aes(x, y)) + geom_path(",
" aes(group = cell)",
") + theme_classic(",
") + coord_fixed(",
" ratio = 1.0",
") + labs(",
" title = 'difference between two models'",
")",
"ggsave(plot = g000, file = '%s', width = 160, height = 160, unit = 'mm')" % lastframe
]
self.r("\n".join(comg))
#
################
###
################
################
###
################
################
###
################
###
|
the-stack_106_21383
|
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the process_util.py module."""
from __future__ import print_function
import os
import signal
from chromite.lib import cros_test_lib
from chromite.lib import process_util
def _SpawnChild(exit_code=None, kill_signal=None):
"""Create a child, have it exit/killed, and return its status."""
assert exit_code is not None or kill_signal is not None
pid = os.fork()
if pid == 0:
# Make sure this child never returns.
while True:
if exit_code is not None:
# pylint: disable=W0212
os._exit(exit_code)
else:
os.kill(os.getpid(), kill_signal)
return os.waitpid(pid, 0)[1]
class GetExitStatusTests(cros_test_lib.TestCase):
"""Tests for GetExitStatus()"""
def testExitNormal(self):
"""Verify normal exits get decoded."""
status = _SpawnChild(exit_code=0)
ret = process_util.GetExitStatus(status)
self.assertEqual(ret, 0)
def testExitError(self):
"""Verify error exits (>0 && <128) get decoded."""
status = _SpawnChild(exit_code=10)
ret = process_util.GetExitStatus(status)
self.assertEqual(ret, 10)
def testExitWeird(self):
"""Verify weird exits (>=128) get decoded."""
status = _SpawnChild(exit_code=150)
ret = process_util.GetExitStatus(status)
self.assertEqual(ret, 150)
def testSIGUSR1(self):
"""Verify normal kill signals get decoded."""
status = _SpawnChild(kill_signal=signal.SIGUSR1)
ret = process_util.GetExitStatus(status)
self.assertEqual(ret, 128 + signal.SIGUSR1)
def testSIGKILL(self):
"""Verify harsh signals get decoded."""
status = _SpawnChild(kill_signal=signal.SIGKILL)
ret = process_util.GetExitStatus(status)
self.assertEqual(ret, 128 + signal.SIGKILL)
class ExitAsStatusTests(cros_test_lib.TestCase):
"""Tests for ExitAsStatus()"""
def _Tester(self, exit_code=None, kill_signal=None):
"""Helper func for testing ExitAsStatus()
Create a child to mimic the grandchild.
Create a grandchild and have it exit/killed.
Assert behavior based on exit/signal behavior.
"""
pid = os.fork()
if pid == 0:
# Let the grandchild exit/kill itself.
# The child should mimic the grandchild.
status = _SpawnChild(exit_code=exit_code, kill_signal=kill_signal)
try:
process_util.ExitAsStatus(status)
except SystemExit as e:
# pylint: disable=W0212
os._exit(e.code)
raise AssertionError('ERROR: should have exited!')
# The parent returns the child's status.
status = os.waitpid(pid, 0)[1]
if exit_code is not None:
self.assertFalse(os.WIFSIGNALED(status))
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), exit_code)
else:
self.assertFalse(os.WIFEXITED(status))
self.assertTrue(os.WIFSIGNALED(status))
self.assertEqual(os.WTERMSIG(status), kill_signal)
def testExitNormal(self):
"""Verify normal exits get decoded."""
self._Tester(exit_code=0)
def testExitError(self):
"""Verify error exits (>0 && <128) get decoded."""
self._Tester(exit_code=10)
def testExitWeird(self):
"""Verify weird exits (>=128) get decoded."""
self._Tester(exit_code=150)
def testSIGUSR1(self):
"""Verify normal kill signals get decoded."""
self._Tester(kill_signal=signal.SIGUSR1)
def testSIGKILL(self):
"""Verify harsh signals get decoded."""
self._Tester(kill_signal=signal.SIGKILL)
|
the-stack_106_21385
|
from pathlib import Path
import os
import sys
import click
from ploomber.spec import DAGSpec
from ploomber import __version__
from ploomber import cli as cli_module
from ploomber import scaffold as _scaffold
from ploomber_scaffold import scaffold as scaffold_project
from ploomber.telemetry import telemetry
@click.group()
@click.version_option(version=__version__)
def cli():
"""Ploomber command line interface.
"""
pass # pragma: no cover
@cli.command()
@click.option(
'--conda',
is_flag=True,
help='Use conda (environemnt.yml)',
)
@click.option(
'--package',
is_flag=True,
help='Use package template (setup.py)',
)
@click.option(
'--empty',
is_flag=True,
help='Create a sample pipeline.yaml with no tasks',
)
@click.option(
'--entry-point',
'-e',
default=None,
help='Entry point to add tasks. Invalid if other flags present',
)
def scaffold(conda, package, entry_point, empty):
"""Create new projects (if no pipeline.yaml exists) or add missings tasks
"""
template = '-e/--entry-point is not compatible with the {flag} flag'
if entry_point and conda:
err = template.format(flag='--conda')
telemetry.log_api("scaffold_error",
metadata={
'type': 'entry_and_conda_flag',
'exception': err,
'argv': sys.argv
})
raise click.ClickException(err)
if entry_point and package:
err = template.format(flag='--package')
telemetry.log_api("scaffold_error",
metadata={
'type': 'entry_and_package_flag',
'exception': err,
'argv': sys.argv
})
raise click.ClickException(err)
if entry_point and empty:
err = template.format(flag='--empty')
telemetry.log_api("scaffold_error",
metadata={
'type': 'entry_and_empty_flag',
'exception': err,
'argv': sys.argv
})
raise click.ClickException(err)
# try to load a dag by looking in default places
if entry_point is None:
loaded = _scaffold.load_dag()
else:
try:
loaded = (
DAGSpec(entry_point, lazy_import='skip'),
Path(entry_point).parent,
Path(entry_point),
)
except Exception as e:
telemetry.log_api("scaffold_error",
metadata={
'type': 'dag_load_failed',
'exception': e,
'argv': sys.argv
})
raise click.ClickException(e) from e
if loaded:
# existing pipeline, add tasks
spec, _, path_to_spec = loaded
_scaffold.add(spec, path_to_spec)
telemetry.log_api("ploomber_scaffold",
dag=loaded,
metadata={
'type': 'add_task',
'argv': sys.argv
})
else:
# no pipeline, create base project
telemetry.log_api("ploomber_scaffold",
metadata={
'type': 'base_project',
'argv': sys.argv
})
scaffold_project.cli(project_path=None,
conda=conda,
package=package,
empty=empty)
@cli.command()
@click.option('-l',
'--use-lock',
help='Use lock files',
default=False,
is_flag=True)
def install(use_lock):
"""Install dependencies and package
"""
cli_module.install.main(use_lock=use_lock)
@cli.command()
@click.option('-n', '--name', help='Example to download', default=None)
@click.option('-f', '--force', help='Force examples download', is_flag=True)
@click.option('-o', '--output', help='Target directory', default=None)
@click.option('-b', '--branch', help='Git branch to use.', default=None)
def examples(name, force, branch, output):
"""Get sample projects. Run "ploomber examples" to list them
"""
try:
cli_module.examples.main(name=name,
force=force,
branch=branch,
output=output)
except click.ClickException:
raise
except Exception as e:
telemetry.log_api("examples_error",
metadata={
'type': 'runtime_error',
'exception': e,
'argv': sys.argv
})
raise RuntimeError(
'An error happened when executing the examples command. Check out '
'the full error message for details. Downloading the examples '
'again or upgrading Ploomber may fix the '
'issue.\nDownload: ploomber examples -f\n'
'Update: pip install ploomber -U\n'
'Update [conda]: conda update ploomber -c conda-forge') from e
def _exit_with_error_message(msg):
click.echo(msg, err=True)
sys.exit(2)
def cmd_router():
cmd_name = None if len(sys.argv) < 2 else sys.argv[1]
custom = {
'build': cli_module.build.main,
'plot': cli_module.plot.main,
'task': cli_module.task.main,
'report': cli_module.report.main,
'interact': cli_module.interact.main,
'status': cli_module.status.main,
'nb': cli_module.nb.main,
}
# users may attempt to run execute/run, suggest to use build instead
# users may make typos when running one of the commands
# suggest correct spelling on obvious typos
alias = {
'execute': 'build',
'run': 'build',
'bulid': 'build',
'buld': 'build',
'bild': 'build',
'uild': 'build',
'buil': 'build',
'example': 'examples',
'exemples': 'examples',
'exmples': 'examples',
'exampes': 'examples',
'tsk': 'task',
'tas': 'task',
'rport': 'report',
'reprt': 'report',
'repor': 'report',
'stat': 'status',
'stats': 'status',
'satus': 'status',
'inteact': 'interact',
'interat': 'interact'
}
if cmd_name in custom:
# NOTE: we don't use the argument here, it is parsed by _main
# pop the second element ('entry') to make the CLI behave as expected
sys.argv.pop(1)
# Add the current working directory, this is done automatically when
# calling "python -m ploomber.build" but not here ("ploomber build")
sys.path.insert(0, os.path.abspath('.'))
fn = custom[cmd_name]
fn()
elif cmd_name in alias:
suggestion = alias[cmd_name]
telemetry.log_api("unsupported_build_cmd",
metadata={
'cmd_name': cmd_name,
'suggestion': suggestion,
'argv': sys.argv
})
_exit_with_error_message("Try 'ploomber --help' for help.\n\n"
f"Error: {cmd_name!r} is not a valid command."
f" Did you mean {suggestion!r}?")
else:
if cmd_name not in ['examples', 'scaffold', 'install']:
telemetry.log_api("unsupported-api-call",
metadata={'argv': sys.argv})
cli()
# the commands below are handled by the router,
# those are a place holder to show up in ploomber --help
@cli.command()
def build():
"""Build pipeline
"""
pass # pragma: no cover
@cli.command()
def status():
"""Show pipeline status
"""
pass # pragma: no cover
@cli.command()
def plot():
"""Plot pipeline
"""
pass # pragma: no cover
@cli.command()
def task():
"""Interact with specific tasks
"""
pass # pragma: no cover
@cli.command()
def report():
"""Make a pipeline report
"""
pass # pragma: no cover
@cli.command()
def interact():
"""Start an interactive session (use the "dag" variable)
"""
pass # pragma: no cover
@cli.command()
def nb():
"""Manage scripts and notebooks
"""
pass # pragma: no cover
|
the-stack_106_21386
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import socket
import time
import traceback
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.backends.chrome_inspector import websocket
from tracing.trace_data import trace_data as trace_data_module
class TracingUnsupportedException(Exception):
pass
class TracingTimeoutException(Exception):
pass
class TracingUnrecoverableException(Exception):
pass
class TracingHasNotRunException(Exception):
pass
class TracingUnexpectedResponseException(Exception):
pass
class ClockSyncResponseException(Exception):
pass
class _DevToolsStreamReader(object):
def __init__(self, inspector_socket, stream_handle):
self._inspector_websocket = inspector_socket
self._handle = stream_handle
self._trace_file_handle = None
self._callback = None
def Read(self, callback):
# Do not allow the instance of this class to be reused, as
# we only read data sequentially at the moment, so a stream
# can only be read once.
assert not self._callback
self._trace_file_handle = trace_data_module.TraceFileHandle()
self._trace_file_handle.Open()
self._callback = callback
self._ReadChunkFromStream()
# The below is not a typo -- queue one extra read ahead to avoid latency.
self._ReadChunkFromStream()
def _ReadChunkFromStream(self):
# Limit max block size to avoid fragmenting memory in sock.recv(),
# (see https://github.com/liris/websocket-client/issues/163 for details)
req = {'method': 'IO.read', 'params': {
'handle': self._handle, 'size': 32768}}
self._inspector_websocket.AsyncRequest(req, self._GotChunkFromStream)
def _GotChunkFromStream(self, response):
# Quietly discard responses from reads queued ahead after EOF.
if self._trace_file_handle is None:
return
if 'error' in response:
raise TracingUnrecoverableException(
'Reading trace failed: %s' % response['error']['message'])
result = response['result']
# Convert the trace data that's receive as UTF32 to its native encoding of
# UTF8 in order to reduce its size.
self._trace_file_handle.AppendTraceData(result['data'].encode('utf8'))
if not result.get('eof', False):
self._ReadChunkFromStream()
return
req = {'method': 'IO.close', 'params': {'handle': self._handle}}
self._inspector_websocket.SendAndIgnoreResponse(req)
self._trace_file_handle.Close()
self._callback(self._trace_file_handle)
self._trace_file_handle = None
class TracingBackend(object):
_TRACING_DOMAIN = 'Tracing'
def __init__(self, inspector_socket, is_tracing_running=False,
support_modern_devtools_tracing_start_api=False):
self._inspector_websocket = inspector_socket
self._inspector_websocket.RegisterDomain(
self._TRACING_DOMAIN, self._NotificationHandler)
self._is_tracing_running = is_tracing_running
self._start_issued = False
self._can_collect_data = False
self._has_received_all_tracing_data = False
# pylint: disable=invalid-name
self._support_modern_devtools_tracing_start_api = (
support_modern_devtools_tracing_start_api)
self._trace_data_builder = None
@property
def is_tracing_running(self):
return self._is_tracing_running
def StartTracing(self, chrome_trace_config, timeout=10):
"""When first called, starts tracing, and returns True.
If called during tracing, tracing is unchanged, and it returns False.
"""
if self.is_tracing_running:
return False
assert not self._can_collect_data, 'Data not collected from last trace.'
# Reset collected tracing data from previous tracing calls.
if not self.IsTracingSupported():
raise TracingUnsupportedException(
'Chrome tracing not supported for this app.')
params = {'transferMode': 'ReturnAsStream'}
if self._support_modern_devtools_tracing_start_api:
params['traceConfig'] = (
chrome_trace_config.GetChromeTraceConfigForDevTools())
else:
if chrome_trace_config.requires_modern_devtools_tracing_start_api:
raise TracingUnsupportedException(
'Trace options require modern Tracing.start DevTools API, '
'which is NOT supported by the browser')
params['categories'], params['options'] = (
chrome_trace_config.GetChromeTraceCategoriesAndOptionsForDevTools())
req = {'method': 'Tracing.start', 'params': params}
logging.info('Start Tracing Request: %r', req)
response = self._inspector_websocket.SyncRequest(req, timeout)
if 'error' in response:
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.start:\n' + json.dumps(response, indent=2))
self._is_tracing_running = True
self._start_issued = True
return True
def RecordClockSyncMarker(self, sync_id):
assert self.is_tracing_running, 'Tracing must be running to clock sync.'
req = {
'method': 'Tracing.recordClockSyncMarker',
'params': {
'syncId': sync_id
}
}
rc = self._inspector_websocket.SyncRequest(req, timeout=2)
if 'error' in rc:
raise ClockSyncResponseException(rc['error']['message'])
def StopTracing(self):
"""Stops tracing and pushes results to the supplied TraceDataBuilder.
If this is called after tracing has been stopped, trace data from the last
tracing run is pushed.
"""
if not self.is_tracing_running:
raise TracingHasNotRunException()
else:
if not self._start_issued:
# Tracing is running but start was not issued so, startup tracing must
# be in effect. Issue another Tracing.start to update the transfer mode.
# TODO(caseq): get rid of it when streaming is the default.
params = {'transferMode': 'ReturnAsStream', 'traceConfig': {}}
req = {'method': 'Tracing.start', 'params': params}
self._inspector_websocket.SendAndIgnoreResponse(req)
req = {'method': 'Tracing.end'}
self._inspector_websocket.SendAndIgnoreResponse(req)
self._is_tracing_running = False
self._start_issued = False
self._can_collect_data = True
def DumpMemory(self, timeout=None):
"""Dumps memory.
Args:
timeout: If not specified defaults to 20 minutes.
Returns:
GUID of the generated dump if successful, None otherwise.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
TracingUnexpectedResponseException: If the response contains an error
or does not contain the expected result.
"""
request = {'method': 'Tracing.requestMemoryDump'}
if timeout is None:
timeout = 1200 # 20 minutes.
try:
response = self._inspector_websocket.SyncRequest(request, timeout)
except inspector_websocket.WebSocketException as err:
if issubclass(
err.websocket_error_type, websocket.WebSocketTimeoutException):
raise TracingTimeoutException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
else:
raise TracingUnrecoverableException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
except (socket.error,
inspector_websocket.WebSocketDisconnected):
raise TracingUnrecoverableException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
if ('error' in response or
'result' not in response or
'success' not in response['result'] or
'dumpGuid' not in response['result']):
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.requestMemoryDump:\n' + json.dumps(response, indent=2))
result = response['result']
return result['dumpGuid'] if result['success'] else None
def CollectTraceData(self, trace_data_builder, timeout=60):
if not self._can_collect_data:
raise Exception('Cannot collect before tracing is finished.')
self._CollectTracingData(trace_data_builder, timeout)
self._can_collect_data = False
def _CollectTracingData(self, trace_data_builder, timeout):
"""Collects tracing data. Assumes that Tracing.end has already been sent.
Args:
trace_data_builder: An instance of TraceDataBuilder to put results into.
timeout: The timeout in seconds.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
"""
self._has_received_all_tracing_data = False
start_time = time.time()
self._trace_data_builder = trace_data_builder
try:
while True:
try:
self._inspector_websocket.DispatchNotifications(timeout)
start_time = time.time()
except inspector_websocket.WebSocketException as err:
if not issubclass(
err.websocket_error_type, websocket.WebSocketTimeoutException):
raise TracingUnrecoverableException(
'Exception raised while collecting tracing data:\n' +
traceback.format_exc())
except socket.error:
raise TracingUnrecoverableException(
'Exception raised while collecting tracing data:\n' +
traceback.format_exc())
if self._has_received_all_tracing_data:
break
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise TracingTimeoutException(
'Only received partial trace data due to timeout after %s '
'seconds. If the trace data is big, you may want to increase '
'the timeout amount.' % elapsed_time)
finally:
self._trace_data_builder = None
def _NotificationHandler(self, res):
if res.get('method') == 'Tracing.dataCollected':
value = res.get('params', {}).get('value')
self._trace_data_builder.AddTraceFor(trace_data_module.CHROME_TRACE_PART,
value)
elif res.get('method') == 'Tracing.tracingComplete':
stream_handle = res.get('params', {}).get('stream')
if not stream_handle:
self._has_received_all_tracing_data = True
return
reader = _DevToolsStreamReader(self._inspector_websocket, stream_handle)
reader.Read(self._ReceivedAllTraceDataFromStream)
def _ReceivedAllTraceDataFromStream(self, trace_handle):
self._trace_data_builder.AddTraceFor(
trace_data_module.CHROME_TRACE_PART, trace_handle)
self._has_received_all_tracing_data = True
def Close(self):
self._inspector_websocket.UnregisterDomain(self._TRACING_DOMAIN)
self._inspector_websocket = None
@decorators.Cache
def IsTracingSupported(self):
req = {'method': 'Tracing.hasCompleted'}
res = self._inspector_websocket.SyncRequest(req, timeout=10)
return not res.get('response')
|
the-stack_106_21387
|
#!/usr/bin/env python
# coding: utf-8
# # [Memanggil Library Pandas](https://academy.dqlab.id/main/livecode/178/346/1682)
# In[1]:
import pandas as pd
import numpy as np
# # [DataFrame & Series](https://academy.dqlab.id/main/livecode/178/346/1683)
# In[2]:
import pandas as pd
# Series
number_list = pd.Series([1, 2, 3, 4, 5, 6])
print("Series:")
print(number_list)
# DataFrame
matrix = [[1, 2, 3],
['a','b','c'],
[3, 4, 5],
['d',4,6]]
matrix_list = pd.DataFrame(matrix)
print("DataFrame:")
print(matrix_list)
# # [Atribut DataFrame & Series - Part 1](https://academy.dqlab.id/main/livecode/178/346/1684)
# In[3]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [1] attribute .info()
print("[1] attribute .info()")
print(matrix_list.info())
# [2] attribute .shape
print("\n[2] attribute .shape")
print(" Shape dari number_list:", number_list.shape)
print(" Shape dari matrix_list:", matrix_list.shape)
# [3] attribute .dtypes
print("\n[3] attribute .dtypes")
print(" Tipe data number_list:", number_list.dtypes)
print(" Tipe data matrix_list:", matrix_list.dtypes)
# [4] attribute .astype()
print("\n[4] attribute .astype()")
print(" Konversi number_list ke str:", number_list.astype("str"))
print(" Konversi matrix_list ke str:", matrix_list.astype("str"))
# # [Atribut DataFrame & Series - Part 2](https://academy.dqlab.id/main/livecode/178/346/1685)
# In[4]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [5] attribute .copy()
print("[5] attribute .copy()")
num_list = number_list.copy()
print(" Copy number_list ke num_list:", num_list)
mtr_list = matrix_list.copy()
print(" Copy matrix_list ke mtr_list:", mtr_list)
# [6] attribute .to_list()
print("[6] attribute .to_list()")
print(number_list.to_list())
# [7] attribute .unique()
print("[7] attribute .unique()")
print(number_list.unique())
# # [Atribut DataFrame & Series - Part 3](https://academy.dqlab.id/main/livecode/178/346/1686)
# In[5]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [8] attribute .index
print("[8] attribute .index")
print(" Index number_list:", number_list.index)
print(" Index matrix_list:", matrix_list.index)
# [9] attribute .columns
print("[9] attribute .columns")
print(" Column matrix_list:", matrix_list.columns)
# [10] attribute .loc
print("[10] attribute .loc")
print(" .loc[0:1] pada number_list:", number_list.loc[0:1])
print(" .loc[0:1] pada matrix_list:", matrix_list.loc[0:1])
# [11] attribute .iloc
print("[11] attribute .iloc")
print(" iloc[0:1] pada number_list:", number_list.iloc[0:1])
print(" iloc[0:1] pada matrix_list:", matrix_list.iloc[0:1])
# # [Creating Series & Dataframe from List](https://academy.dqlab.id/main/livecode/178/346/1688)
# In[6]:
import pandas as pd
# Creating series from list
ex_list = ['a',1,3,5,'c','d']
ex_series = pd.Series(ex_list)
print(ex_series)
# Creating dataframe from list of list
ex_list_of_list = [[1, 'a', 'b', 'c'],
[2.5, 'd', 'e', 'f'],
[5, 'g', 'h', 'i'],
[7.5, 'j', 10.5, 'l']]
index = ['dq', 'lab', 'kar', 'lan']
cols = ['float', 'char', 'obj', 'char']
ex_df = pd.DataFrame(ex_list_of_list, index=index, columns=cols)
print(ex_df)
# # [Creating Series & Dataframe from Dictionary](https://academy.dqlab.id/main/livecode/178/346/1689)
# In[7]:
import pandas as pd
# Creating series from dictionary
dict_series = {'1':'a',
'2':'b',
'3':'c'}
ex_series = pd.Series(dict_series)
print(ex_series)
# Creating dataframe from dictionary
df_series = {'1':['a','b','c'],
'2':['b','c','d'],
'4':[2,3,'z']}
ex_df = pd.DataFrame(df_series)
print(ex_df)
# # [Creating Series & Dataframe from Numpy Array](https://academy.dqlab.id/main/livecode/178/346/1690)
# In[9]:
# import pandas as pd
import numpy as np
# Creating series from numpy array (1D)
arr_series = np.array([1,2,3,4,5,6,6,7])
ex_series = pd.Series(arr_series)
print(ex_series)
# Creating dataframe from numpy array (2D)
arr_df = np.array([[1, 2, 3, 5],
[5, 6, 7, 8],
['a','b','c',10]])
ex_df = pd.DataFrame(arr_df)
print(ex_df)
# # [Read Dataset - CSV dan TSV](https://academy.dqlab.id/main/livecode/178/347/1694)
# In[10]:
import pandas as pd
# File CSV
df_csv = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
print(df_csv.head(3)) # Menampilkan 3 data teratas
# File TSV
df_tsv = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep='\t')
print(df_tsv.head(3)) # Menampilkan 3 data teratas
# # [Read Dataset - Excel](https://academy.dqlab.id/main/livecode/178/347/1695)
# In[11]:
import pandas as pd
# File xlsx dengan data di sheet "test"
df_excel = pd.read_excel("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_excel.xlsx", sheet_name="test")
print(df_excel.head(4)) # Menampilkan 4 data teratas
# # [Read Dataset - JSON](https://academy.dqlab.id/main/livecode/178/347/1698)
# In[13]:
import pandas as pd
# File JSON
url = "https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/covid2019-api-herokuapp-v2.json"
df_json = pd.read_json(url)
print(df_json.head(10)) # Menampilkan 10 data teratas
# # [Head & Tail](https://academy.dqlab.id/main/livecode/178/347/2143)
# In[14]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Tampilkan 3 data teratas
print("Tiga data teratas:\n", df.head(3))
# Tampilkan 3 data terbawah
print("Tiga data terbawah:\n", df.tail(3))
# # [Indexing - Part 2](https://academy.dqlab.id/main/livecode/178/429/2133)
# In[15]:
import pandas as pd
# Baca file TSV sample_tsv.tsv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t")
# Index dari df
print("Index:", df.index)
# Column dari df
print("Columns:", df.columns)
# # [Indexing - Part 3](https://academy.dqlab.id/main/livecode/178/429/2134)
# In[16]:
import pandas as pd
# Baca file TSV sample_tsv.tsv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t")
# Set multi index df
df_x = df.set_index(['order_date', 'city', 'customer_id'])
# Print nama dan level dari multi index
for name, level in zip(df_x.index.names, df_x.index.levels):
print(name,':',level)
# # [Indexing - Part 4](https://academy.dqlab.id/main/livecode/178/429/2135)
# In[17]:
import pandas as pd
# Baca file sample_tsv.tsv untuk 10 baris pertama saja
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t", nrows=10)
# Cetak data frame awal
print("Dataframe awal:\n", df)
# Set index baru
df.index = ["Pesanan ke-" + str(i) for i in range(1, 11)]
# Cetak data frame dengan index baru
print("Dataframe dengan index baru:\n", df)
# # [Indexing - Part 5](https://academy.dqlab.id/main/livecode/178/429/2138)
# In[18]:
import pandas as pd
# Baca file sample_tsv.tsv dan set lah index_col sesuai instruksi
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t", index_col=["order_date","order_id"])
# Cetak data frame untuk 8 data teratas
print("Dataframe:\n", df.head(8))
# # [Slicing - Part 1](https://academy.dqlab.id/main/livecode/178/429/2136)
# In[19]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Slice langsung berdasarkan kolom
df_slice = df.loc[(df["customer_id"] == "18055") &
(df["product_id"].isin(["P0029","P0040","P0041","P0116","P0117"]))]
print("Slice langsung berdasarkan kolom:\n", df_slice)
# # [Slicing - Part 2](https://academy.dqlab.id/main/livecode/178/429/2139)
# In[20]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Set index dari df sesuai instruksi
df = df.set_index(["order_date","order_id","product_id"])
# Slice sesuai intruksi
df_slice = df.loc[("2019-01-01",1612339,["P2154","P2159"]),:]
print("Slice df:\n", df_slice)
# # [Transforming - Part 1](https://academy.dqlab.id/main/livecode/178/429/2142)
# In[21]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Tampilkan tipe data
print("Tipe data df:\n", df.dtypes)
# Ubah tipe data kolom order_date menjadi datetime
df["order_date"] = pd.to_datetime(df["order_date"])
# Tampilkan tipe data df setelah transformasi
print("\nTipe data df setelah transformasi:\n", df.dtypes)
# # [Transforming - Part 2](https://academy.dqlab.id/main/livecode/178/429/2144)
# In[22]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Tampilkan tipe data
print("Tipe data df:\n", df.dtypes)
# Ubah tipe data kolom quantity menjadi tipe data numerik float
df["quantity"] = pd.to_numeric(df["quantity"], downcast="float")
# Ubah tipe data kolom city menjadi tipe data category
df["city"] = df["city"].astype("category")
# Tampilkan tipe data df setelah transformasi
print("\nTipe data df setelah transformasi:\n", df.dtypes)
# # [Transforming - Part 3](https://academy.dqlab.id/main/livecode/178/429/2145)
# In[23]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Cetak 5 baris teratas kolom brand
print("Kolom brand awal:\n", df["brand"].head())
# Gunakan method apply untuk merubah isi kolom menjadi lower case
df["brand"] = df["brand"].apply(lambda x: x.lower())
# Cetak 5 baris teratas kolom brand
print("Kolom brand setelah apply:\n", df["brand"].head())
# Gunakan method map untuk mengambil kode brand yaitu karakter terakhirnya
df["brand"] = df["brand"].map(lambda x: x[-1])
# Cetak 5 baris teratas kolom brand
print("Kolom brand setelah map:\n", df["brand"].head())
# # [Transforming - Part 4](https://academy.dqlab.id/main/livecode/178/429/2146)
# In[24]:
import numpy as np
import pandas as pd
# number generator, set angka seed menjadi suatu angka, bisa semua angka, supaya hasil random nya selalu sama ketika kita run
np.random.seed(1234)
# create dataframe 3 baris dan 4 kolom dengan angka random
df_tr = pd.DataFrame(np.random.rand(3,4))
# Cetak dataframe
print("Dataframe:\n", df_tr)
# Cara 1 dengan tanpa define function awalnya, langsung pake fungsi anonymous lambda x
df_tr1 = df_tr.applymap(lambda x: x**2 + 3*x + 2)
print("\nDataframe - cara 1:\n", df_tr1)
# Cara 2 dengan define function
def qudratic_fun(x):
return x**2 + 3*x + 2
df_tr2 = df_tr.applymap(qudratic_fun)
print("\nDataframe - cara 2:\n", df_tr2)
# # [Inspeksi Missing Value](https://academy.dqlab.id/main/livecode/178/430/2148)
# In[25]:
import pandas as pd
# Baca file "public data covid19 jhu csse eu.csv"
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/CHAPTER+4+-+missing+value+-+public+data+covid19+.csv")
# Cetak info dari df
print(df.info())
# Cetak jumlah missing value di setiap kolom
mv = df.isna().sum()
print("\nJumlah missing value per kolom:\n", mv)
# # [Treatment untuk Missing Value - Part 2](https://academy.dqlab.id/main/livecode/178/430/2150)
# In[26]:
import pandas as pd
# Baca file "public data covid19 jhu csse eu.csv"
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/CHAPTER+4+-+missing+value+-+public+data+covid19+.csv")
# Cetak ukuran awal dataframe
print("Ukuran awal df: %d baris, %d kolom." % df.shape)
# Drop kolom yang seluruhnya missing value dan cetak ukurannya
df = df.dropna(axis=1, how="all")
print("Ukuran df setelah buang kolom dengan seluruh data missing: %d baris, %d kolom." % df.shape)
# Drop baris jika ada satu saja data yang missing dan cetak ukurannya
df = df.dropna(axis=0, how="any")
print("Ukuran df setelah dibuang baris yang memiliki sekurangnya 1 missing value: %d baris, %d kolom." % df.shape)
# # [Treatment untuk Missing Value - Part 3](https://academy.dqlab.id/main/livecode/178/430/2152)
# In[27]:
import pandas as pd
# Baca file "public data covid19 jhu csse eu.csv"
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/CHAPTER+4+-+missing+value+-+public+data+covid19+.csv")
# Cetak unique value pada kolom province_state
print("Unique value awal:\n", df["province_state"].unique())
# Ganti missing value dengan string "unknown_province_state"
df["province_state"] = df["province_state"].fillna("unknown_province_state")
# Cetak kembali unique value pada kolom province_state
print("Unique value setelah fillna:\n", df["province_state"].unique())
# # [Treatment untuk Missing Value - Part 4](https://academy.dqlab.id/main/livecode/178/430/2151)
# In[28]:
import pandas as pd
# Baca file "https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/CHAPTER+4+-+missing+value+-+public+data+covid19+.csv"
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/CHAPTER+4+-+missing+value+-+public+data+covid19+.csv")
# Cetak nilai mean dan median awal
print("Awal: mean = %f, median = %f." % (df["active"].mean(), df["active"].median()))
# Isi missing value kolom active dengan median
df_median = df["active"].fillna(df["active"].median())
# Cetak nilai mean dan median awal setelah diisi dengan median
print("Fillna median: mean = %f, median = %f." % (df_median.mean(), df_median.median()))
# Isi missing value kolom active dengan mean
df_mean = df["active"].fillna(df["active"].mean())
# Cetak nilai mean dan median awal setelah diisi dengan mean
print("Fillna mean: mean = %f, median = %f." % (df_mean.mean(), df_mean.median()))
# # [Treatment untuk Missing Value - Part 5](https://academy.dqlab.id/main/livecode/178/430/2155)
# In[29]:
import numpy as np
import pandas as pd
# Data
ts = pd.Series({
"2020-01-01":9,
"2020-01-02":np.nan,
"2020-01-05":np.nan,
"2020-01-07":24,
"2020-01-10":np.nan,
"2020-01-12":np.nan,
"2020-01-15":33,
"2020-01-17":np.nan,
"2020-01-16":40,
"2020-01-20":45,
"2020-01-22":52,
"2020-01-25":75,
"2020-01-28":np.nan,
"2020-01-30":np.nan
})
# Isi missing value menggunakan interpolasi linier
ts = ts.interpolate()
# Cetak time series setelah interpolasi linier
print("Setelah diisi missing valuenya:\n", ts)
# # [Project dari Andra](https://academy.dqlab.id/main/livecode/178/431/2156)
# In[30]:
import pandas as pd
# 1. Baca dataset
print("[1] BACA DATASET")
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/retail_raw_test.csv", low_memory=False)
print(" Dataset:\n", df.head())
print(" Info:\n", df.info())
# 2. Ubah tipe data
print("\n[2] UBAH TIPE DATA")
df["customer_id"] = df["customer_id"].apply(lambda x: x.split("'")[1]).astype("int64")
df["quantity"] = df["quantity"].apply(lambda x: x.split("'")[1]).astype("int64")
df["item_price"] = df["item_price"].apply(lambda x: x.split("'")[1]).astype("int64")
print(" Tipe data:\n", df.dtypes)
# 3. Transform "product_value" supaya bentuknya seragam dengan format "PXXXX", assign ke kolom baru "product_id", dan drop kolom "product_value", jika terdapat nan gantilah dengan "unknown"
print("\n[3] TRANSFORM product_value MENJADI product_id")
# Buat fungsi
import math
def impute_product_value(val):
if math.isnan(val):
return "unknown"
else:
return 'P' + '{:0>4}'.format(str(val).split('.')[0])
# Buat kolom "product_id"
df["product_id"] = df["product_value"].apply(lambda x: impute_product_value(x))
# Hapus kolom "product_value"
df.drop(["product_value"], axis=1, inplace=True)
# Cetak 5 data teratas
print(df.head())
# 4. Tranform order_date menjadi value dengan format "YYYY-mm-dd"
print("\n[4] TRANSFORM order_date MENJADI FORMAT YYYY-mm-dd")
months_dict = {
"Jan":"01",
"Feb":"02",
"Mar":"03",
"Apr":"04",
"May":"05",
"Jun":"06",
"Jul":"07",
"Aug":"08",
"Sep":"09",
"Oct":"10",
"Nov":"11",
"Dec":"12"
}
df["order_date"] = pd.to_datetime(df["order_date"].apply(lambda x: str(x)[-4:] + "-" + months_dict[str(x)[:3]] + "-" + str(x)[4:7]))
print(" Tipe data:\n", df.dtypes)
# 5. Mengatasi data yang hilang di beberapa kolom
print("\n[5] HANDLING MISSING VALUE")
# Kolom "city" dan "province" masih memiliki missing value, nilai yang hilang di kedua kolom ini diisi saja dengan "unknown"
df[["city","province"]] = df[["city","province"]].fillna("unknown")
# Kolom brand juga masih memiliki missing value, Ganti value NaN menjadi "no_brand"
df["brand"] = df["brand"].fillna("no_brand")
# Cek apakah masih terdapat missing value di seluruh kolom
print(" Info:\n", df.info())
# 6. Membuat kolom baru "city/province" dengan menggabungkan kolom "city" dan kolom "province" dan delete kolom asalnya
print("\n[6] MEMBUAT KOLOM BARU city/province")
df["city/province"] = df["city"] + "/" + df["province"]
# drop kolom "city" dan "province" karena telah digabungkan
df.drop(["city","province"], axis=1, inplace=True)
# Cetak 5 data teratas
print(df.head())
# 7. Membuat hierarchical index yang terdiri dari kolom "city/province", "order_date", "customer_id", "order_id", "product_id"
print("\n[7] MEMBUAT HIERACHICAL INDEX")
df = df.set_index(["city/province","order_date","customer_id","order_id","product_id"])
# urutkanlah berdasarkan index yang baru
df = df.sort_index()
# Cetak 5 data teratas
print(df.head())
# 8. Membuat kolom "total_price" yang formula nya perkalian antara kolom "quantity" dan kolom "item_price"
print("\n[8] MEMBUAT KOLOM total_price")
df["total_price"] = df["quantity"] * df["item_price"]
# Cetak 5 data teratas
print(df.head())
# 9. Slice dataset agar hanya terdapat data bulan Januari 2019
print("\n[9] SLICE DATASET UNTUK BULAN JANUARI 2019 SAJA")
idx = pd.IndexSlice
df_jan2019 = df.loc[idx[:, "2019-01-01":"2019-01-31"],:]
print("Dataset akhir:\n", df_jan2019)
# END OF PROJECT
|
the-stack_106_21388
|
n=int(input('Enter the no of fibonacci series : '))
l1=[0,1]
a=0
b=1
for x in range(n-1):
c=a+b
a=b
b=c
l1.append(c)
print(l1)
|
the-stack_106_21392
|
from __future__ import absolute_import
from collections import OrderedDict
from .layers import *
from .roi import *
def profiling(net, input=None):
# input is either a Blob with the shape of (batch,h,w,c) or a dict of them
layers=[]
if isinstance(input,dict):
blob_dict = OrderedDict(input)
not_ref = [input[k] for k in input]
else:
input_name='data'
if hasattr(net.net,'input'):
input_name=net.net.input[0]
blob_dict = OrderedDict({input_name: input})
not_ref=[input]
for i, layer in enumerate(net.net.layer):
out = None
if len(layer.top) == 1 and len(layer.bottom) == 1:
if layer.type == 'Convolution':
param = layer.convolution_param
out = Conv(blob_dict[layer.bottom[0]], param.kernel_size, param.num_output, param.stride,
param.pad, None, layer.name, group_size=param.group)
if layer.type == 'InnerProduct':
param=layer.inner_product_param
out= fc(blob_dict[layer.bottom[0]],param.num_output,None,layer.name)
if layer.type == 'ReLU':
out = Activation(blob_dict[layer.bottom[0]], 'relu', layer.name)
if layer.type == 'PReLU':
out = Activation(blob_dict[layer.bottom[0]], 'prelu', layer.name)
if layer.type == 'Pooling':
param = layer.pooling_param
out = Pool(blob_dict[layer.bottom[0]], param.kernel_size, param.stride,
param.pad, layer.name,param.pool,ceil=True)
if layer.type == 'Normalize':
out = Norm(blob_dict[layer.bottom[0]], 'norm', layer.name)
if layer.type == 'BatchNorm':
out= Norm(blob_dict[layer.bottom[0]],'batch_norm',layer.name)
if layer.type== 'LRN':
out= Norm(blob_dict[layer.bottom[0]],'lrn',layer.name)
if layer.type == 'Permute':
shape=[blob_dict[layer.bottom[0]][dim-1] for dim in layer.permute_param.order[1:]]
out = Permute(blob_dict[layer.bottom[0]],shape,layer.name)
if layer.type == 'Flatten':
out = Flatten(blob_dict[layer.bottom[0]], layer.name)
if layer.type == 'Scale':
out =Scale (blob_dict[layer.bottom[0]], name = layer.name)
if layer.type == 'Softmax':
out =Softmax (blob_dict[layer.bottom[0]], name = layer.name)
if layer.type == 'Dropout':
out =Dropout (blob_dict[layer.bottom[0]], name = layer.name)
if layer.type == 'Reshape':
out =Reshape (blob_dict[layer.bottom[0]],shape=layer.reshape_param.shape.dim, name = layer.name)
if out:
try:
not_ref.remove(blob_dict[layer.bottom[0]])
except:
pass
blob_dict[layer.top[0]] = out()
not_ref.append(blob_dict[layer.top[0]])
layers.append(out)
else:
assert 'layer type: %s cannot be P' % (layer.type)
elif len(layer.bottom)>1:
# for multi input layer
if layer.type=='Eltwise':
param=layer.eltwise_param
out = Eltwise([blob_dict[bottom] for bottom in layer.bottom],
type=param.EltwiseOp.Name(param.operation),name=layer.name)
if layer.type=='PSROIPooling':
param=layer.psroi_pooling_param
out = PSROIPool(blob_dict[layer.bottom[0]],blob_dict[layer.bottom[1]],
param.output_dim,param.group_size)
if layer.type=='ROIPooling':
param=layer.roi_pooling_param
out = ROIPool(blob_dict[layer.bottom[0]],blob_dict[layer.bottom[1]],
param.pooled_w,param.pooled_h,layer.name)
if layer.type == "Concat":
param = layer.concat_param
out = Concat([blob_dict[bottom] for bottom in layer.bottom],param.axis,layer.name)
if out:
for bottom in layer.bottom:
try:
not_ref.remove(blob_dict[bottom])
except:
pass
blob_dict[layer.top[0]] = out()
not_ref.append(blob_dict[layer.top[0]])
layers.append(out)
else:
assert 'layer type: %s cannot be P' % (layer.type)
elif len(layer.top)>1:
if layer.type == 'Slice':
param=layer.slice_param
out =Slice (blob_dict[layer.bottom[0]], name = layer.name,slice_point=param.slice_point,axis=param.axis)
if out:
try:
not_ref.remove(blob_dict[layer.bottom[0]])
except:
pass
for o,top in zip(out(),layer.top):
blob_dict[top] = o
not_ref.append(blob_dict[top])
layers.append(out)
return blob_dict,layers
|
the-stack_106_21394
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from nemo.collections.asr.modules import rnnt_abstract
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.collections.common.parts import rnn
from nemo.core.classes import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import (
AcousticEncodedRepresentation,
ElementType,
EmbeddedTextType,
LabelsType,
LengthsType,
LogprobsType,
LossType,
NeuralType,
)
from nemo.utils import logging
class RNNTDecoder(rnnt_abstract.AbstractRNNTDecoder, Exportable):
"""A Recurrent Neural Network Transducer Decoder / Prediction Network (RNN-T Prediction Network).
An RNN-T Decoder/Prediction network, comprised of a stateful LSTM model.
Args:
prednet: A dict-like object which contains the following key-value pairs.
pred_hidden: int specifying the hidden dimension of the prediction net.
pred_rnn_layers: int specifying the number of rnn layers.
Optionally, it may also contain the following:
forget_gate_bias: float, set by default to 1.0, which constructs a forget gate
initialized to 1.0.
Reference:
[An Empirical Exploration of Recurrent Network Architectures](http://proceedings.mlr.press/v37/jozefowicz15.pdf)
t_max: int value, set to None by default. If an int is specified, performs Chrono Initialization
of the LSTM network, based on the maximum number of timesteps `t_max` expected during the course
of training.
Reference:
[Can recurrent neural networks warp time?](https://openreview.net/forum?id=SJcKhk-Ab)
weights_init_scale: Float scale of the weights after initialization. Setting to lower than one
sometimes helps reduce variance between runs.
hidden_hidden_bias_scale: Float scale for the hidden-to-hidden bias scale. Set to 0.0 for
the default behaviour.
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the final LSTM RNN layer.
vocab_size: int, specifying the vocabulary size of the embedding layer of the Prediction network,
excluding the RNNT blank token.
normalization_mode: Can be either None, 'batch' or 'layer'. By default, is set to None.
Defines the type of normalization applied to the RNN layer.
random_state_sampling: bool, set to False by default. When set, provides normal-distribution
sampled state tensors instead of zero tensors during training.
Reference:
[Recognizing long-form speech using streaming end-to-end models](https://arxiv.org/abs/1910.11455)
blank_as_pad: bool, set to True by default. When set, will add a token to the Embedding layer of this
prediction network, and will treat this token as a pad token. In essence, the RNNT pad token will
be treated as a pad token, and the embedding layer will return a zero tensor for this token.
It is set by default as it enables various batch optimizations required for batched beam search.
Therefore, it is not recommended to disable this flag.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"targets": NeuralType(('B', 'T'), LabelsType()),
"target_length": NeuralType(tuple('B'), LengthsType()),
"states": [NeuralType(('D', 'B', 'D'), ElementType(), optional=True)], # must always be last
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {
"outputs": NeuralType(('B', 'D', 'T'), EmbeddedTextType()),
"prednet_lengths": NeuralType(tuple('B'), LengthsType()),
"states": [NeuralType((('D', 'B', 'D')), ElementType(), optional=True)], # must always be last
}
def _prepare_for_export(self, **kwargs):
self.freeze()
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
length = 1
targets = torch.full(fill_value=self.blank_idx, size=(1, length), dtype=torch.int32).to(
next(self.parameters()).device
)
target_length = torch.randint(0, length, size=(1,), dtype=torch.int32).to(next(self.parameters()).device)
states = tuple(self.initialize_state(targets.float()))
return (targets, target_length, states)
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set([])
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set([])
def __init__(
self,
prednet: Dict[str, Any],
vocab_size: int,
normalization_mode: Optional[str] = None,
random_state_sampling: bool = False,
blank_as_pad: bool = True,
):
# Required arguments
self.pred_hidden = prednet['pred_hidden']
self.pred_rnn_layers = prednet["pred_rnn_layers"]
self.blank_idx = vocab_size
# Initialize the model (blank token increases vocab size by 1)
super().__init__(vocab_size=vocab_size, blank_idx=self.blank_idx, blank_as_pad=blank_as_pad)
# Optional arguments
forget_gate_bias = prednet.get('forget_gate_bias', 1.0)
t_max = prednet.get('t_max', None)
weights_init_scale = prednet.get('weights_init_scale', 1.0)
hidden_hidden_bias_scale = prednet.get('hidden_hidden_bias_scale', 0.0)
dropout = prednet.get('dropout', 0.0)
self.random_state_sampling = random_state_sampling
self.prediction = self._predict_modules(
vocab_size=vocab_size, # add 1 for blank symbol
pred_n_hidden=self.pred_hidden,
pred_rnn_layers=self.pred_rnn_layers,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
norm=normalization_mode,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
dropout=dropout,
)
# Flag needed for RNNT export support
self._rnnt_export = False
@typecheck()
def forward(self, targets, target_length, states=None):
# y: (B, U)
y = rnn.label_collate(targets)
# state maintenance is unnecessary during training forward call
# to get state, use .predict() method.
if self._rnnt_export:
add_sos = False
else:
add_sos = True
g, states = self.predict(y, state=states, add_sos=add_sos) # (B, U, D)
g = g.transpose(1, 2) # (B, D, U)
return g, target_length, states
def predict(
self,
y: Optional[torch.Tensor] = None,
state: Optional[List[torch.Tensor]] = None,
add_sos: bool = True,
batch_size: Optional[int] = None,
) -> (torch.Tensor, List[torch.Tensor]):
"""
Stateful prediction of scores and state for a (possibly null) tokenset.
This method takes various cases into consideration :
- No token, no state - used for priming the RNN
- No token, state provided - used for blank token scoring
- Given token, states - used for scores + new states
Here:
B - batch size
U - label length
H - Hidden dimension size of RNN
L - Number of RNN layers
Args:
y: Optional torch tensor of shape [B, U] of dtype long which will be passed to the Embedding.
If None, creates a zero tensor of shape [B, 1, H] which mimics output of pad-token on Embedding.
state: An optional list of states for the RNN. Eg: For LSTM, it is the state list length is 2.
Each state must be a tensor of shape [L, B, H].
If None, and during training mode and `random_state_sampling` is set, will sample a
normal distribution tensor of the above shape. Otherwise, None will be passed to the RNN.
add_sos: bool flag, whether a zero vector describing a "start of signal" token should be
prepended to the above "y" tensor. When set, output size is (B, U + 1, H).
batch_size: An optional int, specifying the batch size of the `y` tensor.
Can be infered if `y` and `state` is None. But if both are None, then batch_size cannot be None.
Returns:
A tuple (g, hid) such that -
If add_sos is False:
g: (B, U, H)
hid: (h, c) where h is the final sequence hidden state and c is the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
If add_sos is True:
g: (B, U + 1, H)
hid: (h, c) where h is the final sequence hidden state and c is the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
"""
# Get device and dtype of current module
_p = next(self.parameters())
device = _p.device
dtype = _p.dtype
# If y is not None, it is of shape [B, U] with dtype long.
if y is not None:
if y.device != device:
y = y.to(device)
# (B, U) -> (B, U, H)
y = self.prediction["embed"](y)
else:
# Y is not provided, assume zero tensor with shape [B, 1, H] is required
# Emulates output of embedding of pad token.
if batch_size is None:
B = 1 if state is None else state[0].size(1)
else:
B = batch_size
y = torch.zeros((B, 1, self.pred_hidden), device=device, dtype=dtype)
# Prepend blank "start of sequence" symbol (zero tensor)
if add_sos:
B, U, H = y.shape
start = torch.zeros((B, 1, H), device=y.device, dtype=y.dtype)
y = torch.cat([start, y], dim=1).contiguous() # (B, U + 1, H)
else:
start = None # makes del call later easier
# If in training mode, and random_state_sampling is set,
# initialize state to random normal distribution tensor.
if state is None:
if self.random_state_sampling and self.training:
state = self.initialize_state(y)
# Forward step through RNN
y = y.transpose(0, 1) # (U + 1, B, H)
g, hid = self.prediction["dec_rnn"](y, state)
g = g.transpose(0, 1) # (B, U + 1, H)
del y, start, state
return g, hid
def _predict_modules(
self,
vocab_size,
pred_n_hidden,
pred_rnn_layers,
forget_gate_bias,
t_max,
norm,
weights_init_scale,
hidden_hidden_bias_scale,
dropout,
):
"""
Prepare the trainable parameters of the Prediction Network.
Args:
vocab_size: Vocab size (excluding the blank token).
pred_n_hidden: Hidden size of the RNNs.
pred_rnn_layers: Number of RNN layers.
forget_gate_bias: Whether to perform unit forget gate bias.
t_max: Whether to perform Chrono LSTM init.
norm: Type of normalization to perform in RNN.
weights_init_scale: Float scale of the weights after initialization. Setting to lower than one
sometimes helps reduce variance between runs.
hidden_hidden_bias_scale: Float scale for the hidden-to-hidden bias scale. Set to 0.0 for
the default behaviour.
dropout: Whether to apply dropout to RNN.
"""
if self.blank_as_pad:
embed = torch.nn.Embedding(vocab_size + 1, pred_n_hidden, padding_idx=self.blank_idx)
else:
embed = torch.nn.Embedding(vocab_size, pred_n_hidden)
layers = torch.nn.ModuleDict(
{
"embed": embed,
"dec_rnn": rnn.rnn(
input_size=pred_n_hidden,
hidden_size=pred_n_hidden,
num_layers=pred_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
t_max=t_max,
dropout=dropout,
weights_init_scale=weights_init_scale,
hidden_hidden_bias_scale=hidden_hidden_bias_scale,
),
}
)
return layers
def initialize_state(self, y: torch.Tensor) -> List[torch.Tensor]:
"""
Initialize the state of the RNN layers, with same dtype and device as input `y`.
Args:
y: A torch.Tensor whose device the generated states will be placed on.
Returns:
List of torch.Tensor, each of shape [L, B, H], where
L = Number of RNN layers
B = Batch size
H = Hidden size of RNN.
"""
batch = y.size(0)
if self.random_state_sampling and self.training:
state = [
torch.randn(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
torch.randn(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
]
else:
state = [
torch.zeros(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
torch.zeros(self.pred_rnn_layers, batch, self.pred_hidden, dtype=y.dtype, device=y.device),
]
return state
def score_hypothesis(
self, hypothesis: rnnt_utils.Hypothesis, cache: Dict[Tuple[int], Any]
) -> (torch.Tensor, List[torch.Tensor], torch.Tensor):
"""
Similar to the predict() method, instead this method scores a Hypothesis during beam search.
Hypothesis is a dataclass representing one hypothesis in a Beam Search.
Args:
hypothesis: Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
Returns:
Returns a tuple (y, states, lm_token) such that:
y is a torch.Tensor of shape [1, 1, H] representing the score of the last token in the Hypothesis.
state is a list of RNN states, each of shape [L, 1, H].
lm_token is the final integer token of the hypothesis.
"""
if hypothesis.dec_state is not None:
device = hypothesis.dec_state[0].device
else:
_p = next(self.parameters())
device = _p.device
# parse "blank" tokens in hypothesis
if len(hypothesis.y_sequence) > 0 and hypothesis.y_sequence[-1] == self.blank_idx:
blank_state = True
else:
blank_state = False
# Convert last token of hypothesis to torch.Tensor
target = torch.full([1, 1], fill_value=hypothesis.y_sequence[-1], device=device, dtype=torch.long)
lm_token = target[:, -1] # [1]
# Convert current hypothesis into a tuple to preserve in cache
sequence = tuple(hypothesis.y_sequence)
if sequence in cache:
y, new_state = cache[sequence]
else:
# Obtain score for target token and new states
if blank_state:
y, new_state = self.predict(None, state=None, add_sos=False, batch_size=1) # [1, 1, H]
else:
y, new_state = self.predict(
target, state=hypothesis.dec_state, add_sos=False, batch_size=1
) # [1, 1, H]
y = y[:, -1:, :] # Extract just last state : [1, 1, H]
cache[sequence] = (y, new_state)
return y, new_state, lm_token
def batch_score_hypothesis(
self, hypotheses: List[rnnt_utils.Hypothesis], cache: Dict[Tuple[int], Any], batch_states: List[torch.Tensor]
) -> (torch.Tensor, List[torch.Tensor], torch.Tensor):
"""
Used for batched beam search algorithms. Similar to score_hypothesis method.
Args:
hypothesis: List of Hypotheses. Refer to rnnt_utils.Hypothesis.
cache: Dict which contains a cache to avoid duplicate computations.
batch_states: List of torch.Tensor which represent the states of the RNN for this batch.
Each state is of shape [L, B, H]
Returns:
Returns a tuple (b_y, b_states, lm_tokens) such that:
b_y is a torch.Tensor of shape [B, 1, H] representing the scores of the last tokens in the Hypotheses.
b_state is a list of list of RNN states, each of shape [L, B, H].
Represented as B x List[states].
lm_token is a list of the final integer tokens of the hypotheses in the batch.
"""
final_batch = len(hypotheses)
if final_batch == 0:
raise ValueError("No hypotheses was provided for the batch!")
_p = next(self.parameters())
device = _p.device
dtype = _p.dtype
tokens = []
process = []
done = [None for _ in range(final_batch)]
# For each hypothesis, cache the last token of the sequence and the current states
for i, hyp in enumerate(hypotheses):
sequence = tuple(hyp.y_sequence)
if sequence in cache:
done[i] = cache[sequence]
else:
tokens.append(hyp.y_sequence[-1])
process.append((sequence, hyp.dec_state))
if process:
batch = len(process)
# convert list of tokens to torch.Tensor, then reshape.
tokens = torch.tensor(tokens, device=device, dtype=torch.long).view(batch, -1)
dec_states = self.initialize_state(tokens.to(dtype=dtype)) # [L, B, H]
dec_states = self.batch_initialize_states(dec_states, [d_state for seq, d_state in process])
y, dec_states = self.predict(
tokens, state=dec_states, add_sos=False, batch_size=batch
) # [B, 1, H], List([L, 1, H])
dec_states = tuple(state.to(dtype=dtype) for state in dec_states)
# Update done states and cache shared by entire batch.
j = 0
for i in range(final_batch):
if done[i] is None:
# Select sample's state from the batch state list
new_state = self.batch_select_state(dec_states, j)
# Cache [1, H] scores of the current y_j, and its corresponding state
done[i] = (y[j], new_state)
cache[process[j][0]] = (y[j], new_state)
j += 1
# Set the incoming batch states with the new states obtained from `done`.
batch_states = self.batch_initialize_states(batch_states, [d_state for y_j, d_state in done])
# Create batch of all output scores
# List[1, 1, H] -> [B, 1, H]
batch_y = torch.stack([y_j for y_j, d_state in done])
# Extract the last tokens from all hypotheses and convert to a tensor
lm_tokens = torch.tensor([h.y_sequence[-1] for h in hypotheses], device=device, dtype=torch.long).view(
final_batch
)
return batch_y, batch_states, lm_tokens
def batch_initialize_states(self, batch_states: List[torch.Tensor], decoder_states: List[List[torch.Tensor]]):
"""
Create batch of decoder states.
Args:
batch_states (list): batch of decoder states
([L x (B, H)], [L x (B, H)])
decoder_states (list of list): list of decoder states
[B x ([L x (1, H)], [L x (1, H)])]
Returns:
batch_states (tuple): batch of decoder states
([L x (B, H)], [L x (B, H)])
"""
# LSTM has 2 states
new_states = [[] for _ in range(len(decoder_states[0]))]
for layer in range(self.pred_rnn_layers):
for state_id in range(len(decoder_states[0])):
# batch_states[state_id][layer] = torch.stack([s[state_id][layer] for s in decoder_states])
new_state_for_layer = torch.stack([s[state_id][layer] for s in decoder_states])
new_states[state_id].append(new_state_for_layer)
for state_id in range(len(decoder_states[0])):
new_states[state_id] = torch.stack([state for state in new_states[state_id]])
return new_states
def batch_select_state(self, batch_states: List[torch.Tensor], idx: int) -> List[List[torch.Tensor]]:
"""Get decoder state from batch of states, for given id.
Args:
batch_states (list): batch of decoder states
([L x (B, H)], [L x (B, H)])
idx (int): index to extract state from batch of states
Returns:
(tuple): decoder states for given id
([L x (1, H)], [L x (1, H)])
"""
state_list = []
for state_id in range(len(batch_states)):
states = [batch_states[state_id][layer][idx] for layer in range(self.pred_rnn_layers)]
state_list.append(states)
return state_list
class RNNTJoint(rnnt_abstract.AbstractRNNTJoint, Exportable):
"""A Recurrent Neural Network Transducer Joint Network (RNN-T Joint Network).
An RNN-T Joint network, comprised of a feedforward model.
Args:
jointnet: A dict-like object which contains the following key-value pairs.
encoder_hidden: int specifying the hidden dimension of the encoder net.
pred_hidden: int specifying the hidden dimension of the prediction net.
joint_hidden: int specifying the hidden dimension of the joint net
activation: Activation function used in the joint step. Can be one of
['relu', 'tanh', 'sigmoid'].
Optionally, it may also contain the following:
dropout: float, set to 0.0 by default. Optional dropout applied at the end of the joint net.
num_classes: int, specifying the vocabulary size that the joint network must predict,
excluding the RNNT blank token.
vocabulary: Optional list of strings/tokens that comprise the vocabulary of the joint network.
Unused and kept only for easy access for character based encoding RNNT models.
log_softmax: Optional bool, set to None by default. If set as None, will compute the log_softmax()
based on the value provided.
preserve_memory: Optional bool, set to False by default. If the model crashes due to the memory
intensive joint step, one might try this flag to empty the tensor cache in pytorch.
Warning: This will make the forward-backward pass much slower than normal.
It also might not fix the OOM if the GPU simply does not have enough memory to compute the joint.
fuse_loss_wer: Optional bool, set to False by default.
Fuses the joint forward, loss forward and
wer forward steps. In doing so, it trades of speed for memory conservation by creating sub-batches
of the provided batch of inputs, and performs Joint forward, loss forward and wer forward (optional),
all on sub-batches, then collates results to be exactly equal to results from the entire batch.
When this flag is set, prior to calling forward, the fields `loss` and `wer` (either one) *must*
be set using the `RNNTJoint.set_loss()` or `RNNTJoint.set_wer()` methods.
Further, when this flag is set, the following argument `fused_batch_size` *must* be provided
as a non negative integer. This value refers to the size of the sub-batch.
When the flag is set, the input and output signature of `forward()` of this method changes.
Input - in addition to `encoder_outputs` (mandatory argument), the following arguments can be provided.
- decoder_outputs (optional). Required if loss computation is required.
- encoder_lengths (required)
- transcripts (optional). Required for wer calculation.
- transcript_lengths (optional). Required for wer calculation.
- compute_wer (bool, default false). Whether to compute WER or not for the fused batch.
Output - instead of the usual `joint` log prob tensor, the following results can be returned.
- loss (optional). Returned if decoder_outputs, transcripts and transript_lengths are not None.
- wer_numerator + wer_denominator (optional). Returned if transcripts, transcripts_lengths are provided
and compute_wer is set.
fused_batch_size: Optional int, required if `fuse_loss_wer` flag is set. Determines the size of the
sub-batches. Should be any value below the actual batch size per GPU.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"encoder_outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"decoder_outputs": NeuralType(('B', 'D', 'T'), EmbeddedTextType()),
"encoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"transcripts": NeuralType(('B', 'T'), LabelsType(), optional=True),
"transcript_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"compute_wer": NeuralType(optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
if not self._fuse_loss_wer:
return {
"outputs": NeuralType(('B', 'T', 'T', 'D'), LogprobsType()),
}
else:
return {
"loss": NeuralType(elements_type=LossType(), optional=True),
"wer": NeuralType(elements_type=ElementType(), optional=True),
"wer_numer": NeuralType(elements_type=ElementType(), optional=True),
"wer_denom": NeuralType(elements_type=ElementType(), optional=True),
}
def _prepare_for_export(self, **kwargs):
self.freeze()
self._fuse_loss_wer = False
self.log_softmax = False
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
B, T, U = 1, 8192, 1
encoder_outputs = torch.randn(B, self.encoder_hidden, T).to(next(self.parameters()).device)
decoder_outputs = torch.randn(B, self.pred_hidden, U).to(next(self.parameters()).device)
return (encoder_outputs, decoder_outputs)
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set(["encoder_lengths", "transcripts", "transcript_lengths", "compute_wer"])
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set()
def __init__(
self,
jointnet: Dict[str, Any],
num_classes: int,
vocabulary: Optional[List] = None,
log_softmax: Optional[bool] = None,
preserve_memory: bool = False,
fuse_loss_wer: bool = False,
fused_batch_size: Optional[int] = None,
experimental_fuse_loss_wer: Any = None,
):
super().__init__()
self.vocabulary = vocabulary
self._vocab_size = num_classes
self._num_classes = num_classes + 1 # add 1 for blank symbol
if experimental_fuse_loss_wer is not None:
# TODO: Deprecate in 1.6
logging.warning(
"`experimental_fuse_loss_wer` will be deprecated in NeMo 1.6. Please use `fuse_loss_wer` instead."
)
# Override fuse_loss_wer from deprecated argument
fuse_loss_wer = experimental_fuse_loss_wer
self._fuse_loss_wer = fuse_loss_wer
self._fused_batch_size = fused_batch_size
if fuse_loss_wer and (fused_batch_size is None):
raise ValueError("If `fuse_loss_wer` is set, then `fused_batch_size` cannot be None!")
self._loss = None
self._wer = None
# Log softmax should be applied explicitly only for CPU
self.log_softmax = log_softmax
self.preserve_memory = preserve_memory
if preserve_memory:
logging.warning(
"`preserve_memory` was set for the Joint Model. Please be aware this will severely impact "
"the forward-backward step time. It also might not solve OOM issues if the GPU simply "
"does not have enough memory to compute the joint."
)
# Required arguments
self.encoder_hidden = jointnet['encoder_hidden']
self.pred_hidden = jointnet['pred_hidden']
self.joint_hidden = jointnet['joint_hidden']
self.activation = jointnet['activation']
# Optional arguments
dropout = jointnet.get('dropout', 0.0)
self.pred, self.enc, self.joint_net = self._joint_net_modules(
num_classes=self._num_classes, # add 1 for blank symbol
pred_n_hidden=self.pred_hidden,
enc_n_hidden=self.encoder_hidden,
joint_n_hidden=self.joint_hidden,
activation=self.activation,
dropout=dropout,
)
# Flag needed for RNNT export support
self._rnnt_export = False
@typecheck()
def forward(
self,
encoder_outputs: torch.Tensor,
decoder_outputs: Optional[torch.Tensor],
encoder_lengths: Optional[torch.Tensor] = None,
transcripts: Optional[torch.Tensor] = None,
transcript_lengths: Optional[torch.Tensor] = None,
compute_wer: bool = False,
) -> Union[torch.Tensor, List[Optional[torch.Tensor]]]:
# encoder = (B, D, T)
# decoder = (B, D, U) if passed, else None
encoder_outputs = encoder_outputs.transpose(1, 2) # (B, T, D)
if decoder_outputs is not None:
decoder_outputs = decoder_outputs.transpose(1, 2) # (B, U, D)
if not self._fuse_loss_wer:
if decoder_outputs is None:
raise ValueError(
"decoder_outputs passed is None, and `fuse_loss_wer` is not set. "
"decoder_outputs can only be None for fused step!"
)
out = self.joint(encoder_outputs, decoder_outputs) # [B, T, U, V + 1]
return out
else:
# At least the loss module must be supplied during fused joint
if self._loss is None or self._wer is None:
raise ValueError("`fuse_loss_wer` flag is set, but `loss` and `wer` modules were not provided! ")
# If fused joint step is required, fused batch size is required as well
if self._fused_batch_size is None:
raise ValueError("If `fuse_loss_wer` is set, then `fused_batch_size` cannot be None!")
# When using fused joint step, both encoder and transcript lengths must be provided
if (encoder_lengths is None) or (transcript_lengths is None):
raise ValueError(
"`fuse_loss_wer` is set, therefore encoder and target lengths " "must be provided as well!"
)
losses = []
wer_numer_list = []
wer_denom_list = []
batch_size = int(encoder_outputs.size(0)) # actual batch size
# Iterate over batch using fused_batch_size steps
for batch_idx in range(0, batch_size, self._fused_batch_size):
begin = batch_idx
end = min(begin + self._fused_batch_size, batch_size)
# Extract the sub batch inputs
# sub_enc = encoder_outputs[begin:end, ...]
# sub_transcripts = transcripts[begin:end, ...]
sub_enc = encoder_outputs.narrow(dim=0, start=begin, length=end - begin)
sub_transcripts = transcripts.narrow(dim=0, start=begin, length=end - begin)
sub_enc_lens = encoder_lengths[begin:end]
sub_transcript_lens = transcript_lengths[begin:end]
# Sub transcripts does not need the full padding of the entire batch
# Therefore reduce the decoder time steps to match
max_sub_enc_length = sub_enc_lens.max()
max_sub_transcript_length = sub_transcript_lens.max()
if decoder_outputs is not None:
# Reduce encoder length to preserve computation
# Encoder: [sub-batch, T, D] -> [sub-batch, T', D]; T' < T
if sub_enc.shape[1] != max_sub_enc_length:
sub_enc = sub_enc.narrow(dim=1, start=0, length=max_sub_enc_length)
# sub_dec = decoder_outputs[begin:end, ...] # [sub-batch, U, D]
sub_dec = decoder_outputs.narrow(dim=0, start=begin, length=end - begin) # [sub-batch, U, D]
# Reduce decoder length to preserve computation
# Decoder: [sub-batch, U, D] -> [sub-batch, U', D]; U' < U
if sub_dec.shape[1] != max_sub_transcript_length + 1:
sub_dec = sub_dec.narrow(dim=1, start=0, length=max_sub_transcript_length + 1)
# Perform joint => [sub-batch, T', U', V + 1]
sub_joint = self.joint(sub_enc, sub_dec)
del sub_dec
# Reduce transcript length to correct alignment
# Transcript: [sub-batch, L] -> [sub-batch, L']; L' <= L
if sub_transcripts.shape[1] != max_sub_transcript_length:
sub_transcripts = sub_transcripts.narrow(dim=1, start=0, length=max_sub_transcript_length)
# Compute sub batch loss
# preserve loss reduction type
loss_reduction = self.loss.reduction
# override loss reduction to sum
self.loss.reduction = None
# compute and preserve loss
loss_batch = self.loss(
log_probs=sub_joint,
targets=sub_transcripts,
input_lengths=sub_enc_lens,
target_lengths=sub_transcript_lens,
)
losses.append(loss_batch)
# reset loss reduction type
self.loss.reduction = loss_reduction
else:
losses = None
# Compute WER for sub batch
if compute_wer:
sub_enc = sub_enc.transpose(1, 2) # [B, T, D] -> [B, D, T]
sub_enc = sub_enc.detach()
sub_transcripts = sub_transcripts.detach()
original_log_prediction = self.wer.log_prediction
if batch_idx == 0:
self.wer.log_prediction = True
else:
self.wer.log_prediction = False
# Compute the wer (with logging for just 1st sub-batch)
self.wer.update(sub_enc, sub_enc_lens, sub_transcripts, sub_transcript_lens)
wer, wer_num, wer_denom = self.wer.compute()
self.wer.reset()
wer_numer_list.append(wer_num)
wer_denom_list.append(wer_denom)
# Reset logging default
self.wer.log_prediction = original_log_prediction
else:
wer = None
del sub_enc, sub_transcripts, sub_enc_lens, sub_transcript_lens
# Collect sub batch loss results
if losses is not None:
losses = torch.cat(losses, 0)
losses = losses.mean() # global batch size average
else:
losses = None
# Collect sub batch wer results
if compute_wer:
wer_num = torch.tensor(wer_numer_list, dtype=torch.long)
wer_denom = torch.tensor(wer_denom_list, dtype=torch.long)
wer_num = wer_num.sum() # global sum of correct words/chars
wer_denom = wer_denom.sum() # global sum of all words/chars
else:
wer_num = None
wer_denom = None
return losses, wer, wer_num, wer_denom
def joint(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
"""
Compute the joint step of the network.
Here,
B = Batch size
T = Acoustic model timesteps
U = Target sequence length
H1, H2 = Hidden dimensions of the Encoder / Decoder respectively
H = Hidden dimension of the Joint hidden step.
V = Vocabulary size of the Decoder (excluding the RNNT blank token).
NOTE:
The implementation of this model is slightly modified from the original paper.
The original paper proposes the following steps :
(enc, dec) -> Expand + Concat + Sum [B, T, U, H1+H2] -> Forward through joint hidden [B, T, U, H] -- *1
*1 -> Forward through joint final [B, T, U, V + 1].
We instead split the joint hidden into joint_hidden_enc and joint_hidden_dec and act as follows:
enc -> Forward through joint_hidden_enc -> Expand [B, T, 1, H] -- *1
dec -> Forward through joint_hidden_dec -> Expand [B, 1, U, H] -- *2
(*1, *2) -> Sum [B, T, U, H] -> Forward through joint final [B, T, U, V + 1].
Args:
f: Output of the Encoder model. A torch.Tensor of shape [B, T, H1]
g: Output of the Decoder model. A torch.Tensor of shape [B, U, H2]
Returns:
Logits / log softmaxed tensor of shape (B, T, U, V + 1).
"""
# f = [B, T, H1]
f = self.enc(f)
f.unsqueeze_(dim=2) # (B, T, 1, H)
# g = [B, U, H2]
g = self.pred(g)
g.unsqueeze_(dim=1) # (B, 1, U, H)
inp = f + g # [B, T, U, H]
del f, g
res = self.joint_net(inp) # [B, T, U, V + 1]
del inp
if self.preserve_memory:
torch.cuda.empty_cache()
# If log_softmax is automatic
if self.log_softmax is None:
if not res.is_cuda: # Use log softmax only if on CPU
res = res.log_softmax(dim=-1)
else:
if self.log_softmax:
res = res.log_softmax(dim=-1)
return res
def _joint_net_modules(self, num_classes, pred_n_hidden, enc_n_hidden, joint_n_hidden, activation, dropout):
"""
Prepare the trainable modules of the Joint Network
Args:
num_classes: Number of output classes (vocab size) excluding the RNNT blank token.
pred_n_hidden: Hidden size of the prediction network.
enc_n_hidden: Hidden size of the encoder network.
joint_n_hidden: Hidden size of the joint network.
activation: Activation of the joint. Can be one of [relu, tanh, sigmoid]
dropout: Dropout value to apply to joint.
"""
pred = torch.nn.Linear(pred_n_hidden, joint_n_hidden)
enc = torch.nn.Linear(enc_n_hidden, joint_n_hidden)
if activation not in ['relu', 'sigmoid', 'tanh']:
raise ValueError("Unsupported activation for joint step - please pass one of " "[relu, sigmoid, tanh]")
activation = activation.lower()
if activation == 'relu':
activation = torch.nn.ReLU(inplace=True)
elif activation == 'sigmoid':
activation = torch.nn.Sigmoid()
elif activation == 'tanh':
activation = torch.nn.Tanh()
layers = (
[activation]
+ ([torch.nn.Dropout(p=dropout)] if dropout else [])
+ [torch.nn.Linear(joint_n_hidden, num_classes)]
)
return pred, enc, torch.nn.Sequential(*layers)
@property
def num_classes_with_blank(self):
return self._num_classes
@property
def loss(self):
return self._loss
def set_loss(self, loss):
if not self._fuse_loss_wer:
raise ValueError("Attempting to set loss module even though `fuse_loss_wer` is not set!")
self._loss = loss
@property
def wer(self):
return self._wer
def set_wer(self, wer):
if not self._fuse_loss_wer:
raise ValueError("Attempting to set WER module even though `fuse_loss_wer` is not set!")
self._wer = wer
@property
def fuse_loss_wer(self):
return self._fuse_loss_wer
def set_fuse_loss_wer(self, fuse_loss_wer):
self._fuse_loss_wer = fuse_loss_wer
if self._fuse_loss_wer is False:
self._loss = None
self._wer = None
@property
def fused_batch_size(self):
return self._fuse_loss_wer
def set_fused_batch_size(self, fused_batch_size):
self._fused_batch_size = fused_batch_size
|
the-stack_106_21396
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
class Customer(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published')
)
GENDER_CHOICES = (
('M', 'Male'),
('F','Female'),
('I', 'Intersex')
)
title = models.CharField(max_length=250, null=False)
name = models.CharField( max_length=250)
last_name = models.CharField(max_length=250)
gender = models.CharField(max_length=10, choices=GENDER_CHOICES)
created_by = models.ForeignKey(
User, related_name='created_by', editable=False, on_delete=models.PROTECT, default=1)
created = models.DateTimeField(default=timezone.now)
status = models.CharField(
max_length=10, choices=STATUS_CHOICES, default='draft')
objects = models.Manager()
published = PublishedManager()
class Meta:
verbose_name = "Customer"
verbose_name_plural = "Customers"
def __str__(self):
return "{} {}".format(self.name,self.last_name)
|
the-stack_106_21397
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 25 12:16:36 2020
@author: 11627
"""
from torch import nn
import torch
def initialize_weights(*models):
for model in models:
for module in model.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
"""
Up Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class Attention_block(nn.Module):
"""
Attention Block
"""
def __init__(self, F_g, F_l, F_int):
super(Attention_block, self).__init__()
self.W_g = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(F_int)
)
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self, g, x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1 + x1)
psi = self.psi(psi)
out = x * psi
return out
class AttS_Net(nn.Module):
"""
Attention Unet implementation
Paper: https://arxiv.org/abs/1804.03999
"""
def __init__(self, img_ch=1, num_classes=1):
super(AttS_Net, self).__init__()
n1 = 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(img_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.Up5 = up_conv(filters[4], filters[3])
self.Att5 = Attention_block(F_g=filters[3], F_l=filters[3], F_int=filters[2])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Att4 = Attention_block(F_g=filters[2], F_l=filters[2], F_int=filters[1])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Att3 = Attention_block(F_g=filters[1], F_l=filters[1], F_int=filters[0])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Att2 = Attention_block(F_g=filters[0], F_l=filters[0], F_int=32)
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv2d(filters[0], num_classes, kernel_size=1, stride=1, padding=0)
initialize_weights(self)
#self.active = torch.nn.Sigmoid()
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
#print(x5.shape)
d5 = self.Up5(e5)
#print(d5.shape)
x4 = self.Att5(g=d5, x=e4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=e3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=e2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=e1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
# out = self.active(out)
return e1,e2,d2,out
|
the-stack_106_21402
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
import torch.nn.functional as F
from ml.rl import types as rlt
from ml.rl.models.base import ModelBase
from ml.rl.models.fully_connected_network import FullyConnectedNetwork
class CategoricalDQN(ModelBase):
def __init__(
self,
state_dim,
action_dim,
num_atoms,
qmin,
qmax,
sizes,
activations,
use_batch_norm=False,
dropout_ratio=0.0,
use_gpu=False,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
self.fc = FullyConnectedNetwork(
[state_dim] + sizes + [action_dim * num_atoms],
activations + ["linear"],
use_batch_norm=use_batch_norm,
dropout_ratio=dropout_ratio,
)
self.num_atoms = num_atoms
self.action_dim = action_dim
self.support = torch.linspace(qmin, qmax, num_atoms)
if use_gpu and torch.cuda.is_available():
self.support = self.support.cuda()
def input_prototype(self):
return rlt.PreprocessedState.from_tensor(torch.randn(1, self.state_dim))
def forward(self, input: rlt.PreprocessedState):
dist = self.log_dist(input).exp()
q_values = (dist * self.support).sum(2)
return rlt.AllActionQValues(q_values=q_values)
def log_dist(self, input: rlt.PreprocessedState):
log_dist = self.fc(input.state.float_features).reshape(
-1, self.action_dim, self.num_atoms
)
return F.log_softmax(log_dist, -1)
def serving_model(self):
return self.cpu_model()
|
the-stack_106_21403
|
import sys
import os
sys.path.append(os.path.abspath("../"))
from unittest import TestCase
from komand_sentinelone.connection.connection import Connection
from komand_sentinelone.actions.apps_by_agent_ids import AppsByAgentIds
import json
import logging
class TestAppsByAgentIds(TestCase):
def test_integration_apps_by_agent_ids(self):
"""
This is an integration test that will connect to the services your plugin uses. It should be used
as the basis for tests below that can run independent of a "live" connection.
This test assumes a normal plugin structure with a /tests directory. In that /tests directory should
be json samples that contain all the data needed to run this test. To generate samples run:
icon-plugin generate samples
"""
log = logging.getLogger("Test")
test_conn = Connection()
test_action = AppsByAgentIds()
test_conn.logger = log
test_action.logger = log
try:
with open("../tests/apps_by_agent_ids.json") as file:
test_json = json.loads(file.read()).get("body")
connection_params = test_json.get("connection")
action_params = test_json.get("input")
except Exception as e:
message = """
Could not find or read sample tests from /tests directory
An exception here likely means you didn't fill out your samples correctly in the /tests directory
Please use 'icon-plugin generate samples', and fill out the resulting test files in the /tests directory
"""
self.fail(message)
test_conn.connect(connection_params)
test_action.connection = test_conn
results = test_action.run(action_params)
self.assertTrue(len(results.get("data")) >= 0)
|
the-stack_106_21404
|
import socket
class TcpServer():
def __init__(self):
pass
def start(self, port):
# initialzie main socket
main_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
main_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
main_addr = ("", port)
main_socket.bind(main_addr)
main_socket.listen(5)
self.listen_loop(main_socket)
def listen_loop(self, main_socket):
try:
while True:
print("---main process: wait for the next client...---")
new_socket, client_addr = main_socket.accept()
print("---main process: client connected: {0}---".format(client_addr))
self.tcp_worker(new_socket, client_addr)
finally:
print("---main process: server shutdown---")
main_socket.close()
def tcp_worker(self, new_socket, client_addr):
try:
while True:
recv_data = new_socket.recv(1024)
if len(recv_data) > 0:
print("{0}: {1}".format(client_addr, recv_data))
else:
print("---main process: client disconnected: {}---".format(client_addr))
break
finally:
new_socket.close()
def main():
new_server = TcpServer()
new_server.start(7788)
if __name__ == "__main__":
main()
|
the-stack_106_21407
|
from typing import TYPE_CHECKING, Hashable, List, Tuple, Union
import numpy as np
from pandas._config.config import option_context
from pandas._libs.indexing import _NDFrameIndexerBase
from pandas._libs.lib import item_from_zerodim
from pandas.errors import AbstractMethodError, InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_array_like,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sequence,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import _infer_fill_value, isna
import pandas.core.common as com
from pandas.core.construction import array as pd_array
from pandas.core.indexers import (
check_array_indexer,
is_list_like_indexer,
length_of_indexer,
)
from pandas.core.indexes.api import Index
if TYPE_CHECKING:
from pandas import DataFrame # noqa:F401
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice:
"""
Create an object to more easily perform multi-index slicing.
See Also
--------
MultiIndex.remove_unused_levels : New MultiIndex with no unused levels.
Notes
-----
See :ref:`Defined Levels <advanced.shown_levels>`
for further info on slicing a MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class IndexingMixin:
"""
Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series.
"""
@property
def iloc(self) -> "_iLocIndexer":
"""
Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`.
See Also
--------
DataFrame.iat : Fast integer location scalar accessor.
DataFrame.loc : Purely label-location based indexer for selection by label.
Series.iloc : Purely integer-location based indexing for
selection by position.
Examples
--------
>>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
... {'a': 100, 'b': 200, 'c': 300, 'd': 400},
... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
>>> df = pd.DataFrame(mydict)
>>> df
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing just the rows**
With a scalar integer.
>>> type(df.iloc[0])
<class 'pandas.core.series.Series'>
>>> df.iloc[0]
a 1
b 2
c 3
d 4
Name: 0, dtype: int64
With a list of integers.
>>> df.iloc[[0]]
a b c d
0 1 2 3 4
>>> type(df.iloc[[0]])
<class 'pandas.core.frame.DataFrame'>
>>> df.iloc[[0, 1]]
a b c d
0 1 2 3 4
1 100 200 300 400
With a `slice` object.
>>> df.iloc[:3]
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
With a boolean mask the same length as the index.
>>> df.iloc[[True, False, True]]
a b c d
0 1 2 3 4
2 1000 2000 3000 4000
With a callable, useful in method chains. The `x` passed
to the ``lambda`` is the DataFrame being sliced. This selects
the rows whose index label even.
>>> df.iloc[lambda x: x.index % 2 == 0]
a b c d
0 1 2 3 4
2 1000 2000 3000 4000
**Indexing both axes**
You can mix the indexer types for the index and columns. Use ``:`` to
select the entire axis.
With scalar integers.
>>> df.iloc[0, 1]
2
With lists of integers.
>>> df.iloc[[0, 2], [1, 3]]
b d
0 2 4
2 2000 4000
With `slice` objects.
>>> df.iloc[1:3, 0:3]
a b c
1 100 200 300
2 1000 2000 3000
With a boolean array whose length matches the columns.
>>> df.iloc[:, [True, False, True, False]]
a c
0 1 3
1 100 300
2 1000 3000
With a callable function that expects the Series or DataFrame.
>>> df.iloc[:, lambda df: [0, 2]]
a c
0 1 3
1 100 300
2 1000 3000
"""
return _iLocIndexer("iloc", self)
@property
def loc(self) -> "_LocIndexer":
"""
Access a group of rows and columns by label(s) or a boolean array.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'``.
.. warning:: Note that contrary to usual python slices, **both** the
start and the stop are included
- A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
See more at :ref:`Selection by Label <indexing.label>`
Raises
------
KeyError
If any items are not found.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.iloc : Access group of rows and columns by integer position(s).
DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame.
Series.loc : Access group of values using labels.
Examples
--------
**Getting values**
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
Single label. Note this returns the row as a Series.
>>> df.loc['viper']
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[['viper', 'sidewinder']]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for row and column
>>> df.loc['cobra', 'shield']
2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
>>> df.loc['cobra':'viper', 'max_speed']
cobra 1
viper 4
Name: max_speed, dtype: int64
Boolean list with the same length as the row axis
>>> df.loc[[False, False, True]]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
>>> df.loc[df['shield'] > 6, ['max_speed']]
max_speed
sidewinder 7
Callable that returns a boolean Series
>>> df.loc[lambda df: df['shield'] == 8]
max_speed shield
sidewinder 7 8
**Setting values**
Set value for all items matching the list of labels
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
>>> df
max_speed shield
cobra 1 2
viper 4 50
sidewinder 7 50
Set value for an entire row
>>> df.loc['cobra'] = 10
>>> df
max_speed shield
cobra 10 10
viper 4 50
sidewinder 7 50
Set value for an entire column
>>> df.loc[:, 'max_speed'] = 30
>>> df
max_speed shield
cobra 30 10
viper 30 50
sidewinder 30 50
Set value for rows matching callable condition
>>> df.loc[df['shield'] > 35] = 0
>>> df
max_speed shield
cobra 30 10
viper 0 0
sidewinder 0 0
**Getting values on a DataFrame with an index that has integer labels**
Another example using integers for the index
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=[7, 8, 9], columns=['max_speed', 'shield'])
>>> df
max_speed shield
7 1 2
8 4 5
9 7 8
Slice with integer labels for rows. As mentioned above, note that both
the start and stop of the slice are included.
>>> df.loc[7:9]
max_speed shield
7 1 2
8 4 5
9 7 8
**Getting values with a MultiIndex**
A number of examples using a DataFrame with a MultiIndex
>>> tuples = [
... ('cobra', 'mark i'), ('cobra', 'mark ii'),
... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
... ('viper', 'mark ii'), ('viper', 'mark iii')
... ]
>>> index = pd.MultiIndex.from_tuples(tuples)
>>> values = [[12, 2], [0, 4], [10, 20],
... [1, 4], [7, 1], [16, 36]]
>>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
>>> df
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Single label. Note this returns a DataFrame with a single index.
>>> df.loc['cobra']
max_speed shield
mark i 12 2
mark ii 0 4
Single index tuple. Note this returns a Series.
>>> df.loc[('cobra', 'mark ii')]
max_speed 0
shield 4
Name: (cobra, mark ii), dtype: int64
Single label for row and column. Similar to passing in a tuple, this
returns a Series.
>>> df.loc['cobra', 'mark i']
max_speed 12
shield 2
Name: (cobra, mark i), dtype: int64
Single tuple. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[[('cobra', 'mark ii')]]
max_speed shield
cobra mark ii 0 4
Single tuple for the index with a single label for the column
>>> df.loc[('cobra', 'mark i'), 'shield']
2
Slice from index tuple to single label
>>> df.loc[('cobra', 'mark i'):'viper']
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Slice from index tuple to index tuple
>>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')]
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
"""
return _LocIndexer("loc", self)
@property
def at(self) -> "_AtIndexer":
"""
Access a single value for a row/column label pair.
Similar to ``loc``, in that both provide label-based lookups. Use
``at`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
KeyError
If 'label' does not exist in DataFrame.
See Also
--------
DataFrame.iat : Access a single value for a row/column pair by integer
position.
DataFrame.loc : Access a group of rows and columns by label(s).
Series.at : Access a single value using a label.
Examples
--------
>>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... index=[4, 5, 6], columns=['A', 'B', 'C'])
>>> df
A B C
4 0 2 3
5 0 4 1
6 10 20 30
Get value at specified row/column pair
>>> df.at[4, 'B']
2
Set value at specified row/column pair
>>> df.at[4, 'B'] = 10
>>> df.at[4, 'B']
10
Get value within a Series
>>> df.loc[5].at['B']
4
"""
return _AtIndexer("at", self)
@property
def iat(self) -> "_iAtIndexer":
"""
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
IndexError
When integer position is out of bounds.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.loc : Access a group of rows and columns by label(s).
DataFrame.iloc : Access a group of rows and columns by integer position(s).
Examples
--------
>>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
1
Set value at specified row/column pair
>>> df.iat[1, 2] = 10
>>> df.iat[1, 2]
10
Get value within a series
>>> df.loc[0].iat[1]
2
"""
return _iAtIndexer("iat", self)
class _LocationIndexer(_NDFrameIndexerBase):
_valid_types: str
axis = None
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = type(self)(self.name, self.obj)
if axis is not None:
axis = self.obj._get_axis_number(axis)
new_self.axis = axis
return new_self
def _get_setitem_indexer(self, key):
"""
Convert a potentially-label-based key into a positional indexer.
"""
if self.name == "loc":
self._ensure_listlike_indexer(key)
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
ax = self.obj._get_axis(0)
if isinstance(ax, ABCMultiIndex) and self.name != "iloc":
try:
return ax.get_loc(key)
except (TypeError, KeyError, InvalidIndexError):
# TypeError e.g. passed a bool
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return list(key)
try:
return self._convert_to_indexer(key, axis=0, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if "cannot do" in str(e):
raise
elif "unhashable type" in str(e):
raise
raise IndexingError(key) from e
def _ensure_listlike_indexer(self, key, axis=None):
"""
Ensure that a list-like of column labels are all present by adding them if
they do not already exist.
Parameters
----------
key : list-like of column labels
Target labels.
axis : key axis if known
"""
column_axis = 1
# column only exists in 2-dimensional DataFrame
if self.ndim != 2:
return
if isinstance(key, tuple):
# key may be a tuple if we are .loc
# in that case, set key to the column part of key
key = key[column_axis]
axis = column_axis
if (
axis == column_axis
and not isinstance(self.obj.columns, ABCMultiIndex)
and is_list_like_indexer(key)
and not com.is_bool_indexer(key)
and all(is_hashable(k) for k in key)
):
for k in key:
if k not in self.obj:
self.obj[k] = np.nan
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
else:
key = com.apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._has_valid_setitem_indexer(key)
iloc = self if self.name == "iloc" else self.obj.iloc
iloc._setitem_with_indexer(indexer, value)
def _validate_key(self, key, axis: int):
"""
Ensure that key is valid for current indexer.
Parameters
----------
key : scalar, slice or list-like
Key requested.
axis : int
Dimension on which the indexing is being made.
Raises
------
TypeError
If the key (or some element of it) has wrong type.
IndexError
If the key (or some element of it) is out of bounds.
KeyError
If the key was not found.
"""
raise AbstractMethodError(self)
def _has_valid_tuple(self, key: Tuple):
"""
Check the key for valid keys across my indexer.
"""
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
try:
self._validate_key(k, i)
except ValueError as err:
raise ValueError(
"Location based indexing can only have "
f"[{self._valid_types}] types"
) from err
def _is_nested_tuple_indexer(self, tup: Tuple) -> bool:
"""
Returns
-------
bool
"""
if any(isinstance(ax, ABCMultiIndex) for ax in self.obj.axes):
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
def _convert_tuple(self, key, is_setter: bool = False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(
self._convert_to_indexer(key, axis=axis, is_setter=is_setter)
)
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _getitem_tuple_same_dim(self, tup: Tuple):
"""
Index with indexers that should return an object of the same dimension
as self.obj.
This is only called after a failed call to _getitem_lowerdim.
"""
retval = self.obj
for i, key in enumerate(tup):
if com.is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
# We should never have retval.ndim < self.ndim, as that should
# be handled by the _getitem_lowerdim call above.
assert retval.ndim == self.ndim
return retval
def _getitem_lowerdim(self, tup: Tuple):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, ABCMultiIndex) and self.name != "iloc":
try:
result = self._handle_lowerdim_multi_index_axis0(tup)
return result
except IndexingError:
pass
if len(tup) > self.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
for i, key in enumerate(tup):
if is_label_like(key):
# We don't need to check for tuples here because those are
# caught by the _is_nested_tuple_indexer check above.
section = self._getitem_axis(key, axis=i)
# We should never have a scalar section here, because
# _getitem_lowerdim is only called after a check for
# is_scalar_access, which that would be.
if section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1 :]
else:
# Note: the section.ndim == self.ndim check above
# rules out having DataFrame here, so we dont need to worry
# about transposing.
new_key = tup[:i] + tup[i + 1 :]
if len(new_key) == 1:
new_key = new_key[0]
# Slices should return views, but calling iloc/loc with a null
# slice returns a new object.
if com.is_null_slice(new_key):
return section
# This is an elided recursive call to iloc/loc
return getattr(section, self.name)[new_key]
raise IndexingError("not applicable")
def _getitem_nested_tuple(self, tup: Tuple):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionality here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
if self.name != "loc":
# This should never be reached, but lets be explicit about it
raise ValueError("Too many indices")
try:
result = self._handle_lowerdim_multi_index_axis0(tup)
return result
except IndexingError:
pass
# this is a series with a multi-index specified a tuple of
# selectors
axis = self.axis or 0
return self._getitem_axis(tup, axis=axis)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for key in tup:
if com.is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, "ndim"):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
axis -= 1
return obj
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
raise AbstractMethodError(self)
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
if self._is_scalar_access(key):
try:
return self.obj._get_value(*key, takeable=self._takeable)
except (KeyError, IndexError, AttributeError):
# AttributeError for IntervalTree get_value
pass
return self._getitem_tuple(key)
else:
# we by definition only have the 0th axis
axis = self.axis or 0
maybe_callable = com.apply_if_callable(key, self.obj)
return self._getitem_axis(maybe_callable, axis=axis)
def _is_scalar_access(self, key: Tuple):
raise NotImplementedError()
def _getitem_tuple(self, tup: Tuple):
raise AbstractMethodError(self)
def _getitem_axis(self, key, axis: int):
raise NotImplementedError()
def _has_valid_setitem_indexer(self, indexer) -> bool:
raise AbstractMethodError(self)
def _getbool_axis(self, key, axis: int):
# caller is responsible for ensuring non-None axis
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds = key.nonzero()[0]
return self.obj._take_with_is_copy(inds, axis=axis)
@doc(IndexingMixin.loc)
class _LocIndexer(_LocationIndexer):
_takeable: bool = False
_valid_types = (
"labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean"
)
# -------------------------------------------------------------------
# Key Checks
@doc(_LocationIndexer._validate_key)
def _validate_key(self, key, axis: int):
# valid for a collection of labels (we check their presence later)
# slice of labels (where start-end in labels)
# slice of integers (only if in the labels)
# boolean
pass
def _has_valid_setitem_indexer(self, indexer) -> bool:
return True
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
-------
bool
"""
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, ABCMultiIndex):
return False
if isinstance(k, str) and ax._supports_partial_string_indexing:
# partial string indexing, df.loc['2000', 'A']
# should not be considered scalar
return False
if not ax.is_unique:
return False
return True
# -------------------------------------------------------------------
# MultiIndex Handling
def _multi_take_opportunity(self, tup: Tuple) -> bool:
"""
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed, must be indexed with
list-likes.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis.
Returns
-------
bool
Whether the current indexing,
can be passed through `_multi_take`.
"""
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
if any(com.is_bool_indexer(x) for x in tup):
return False
return True
def _multi_take(self, tup: Tuple):
"""
Create the indexers for the passed tuple of keys, and
executes the take operation. This allows the take operation to be
executed all at once, rather than once for each dimension.
Improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis.
Returns
-------
values: same type as the object being indexed
"""
# GH 836
d = {
axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)
}
return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
# -------------------------------------------------------------------
def _getitem_iterable(self, key, axis: int):
"""
Index current object with an an iterable collection of keys.
Parameters
----------
key : iterable
Targeted labels.
axis: int
Dimension on which the indexing is being made.
Raises
------
KeyError
If no key was found. Will change in the future to raise if not all
keys were found.
Returns
-------
scalar, DataFrame, or Series: indexed value(s).
"""
# we assume that not com.is_bool_indexer(key), as that is
# handled before we get here.
self._validate_key(key, axis)
# A collection of keys
keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
return self.obj._reindex_with_indexers(
{axis: [keyarr, indexer]}, copy=True, allow_dups=True
)
def _getitem_tuple(self, tup: Tuple):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
return self._getitem_tuple_same_dim(tup)
def _get_label(self, label, axis: int):
# GH#5667 this will fail if the label is not present in the axis.
return self.obj.xs(label, axis=axis)
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=axis)
except TypeError:
# slices are unhashable
pass
except KeyError as ek:
# raise KeyError if number of indexers match
# else IndexingError will be raised
if len(tup) <= self.obj.index.nlevels and len(tup) > self.ndim:
raise ek
raise IndexingError("No label returned")
def _getitem_axis(self, key, axis: int):
key = item_from_zerodim(key)
if is_iterator(key):
key = list(key)
labels = self.obj._get_axis(axis)
key = labels._get_partial_string_timestamp_match_key(key)
if isinstance(key, slice):
self._validate_key(key, axis)
return self._get_slice_axis(key, axis=axis)
elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._validate_key(key, axis)
return self._get_label(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
"""
This is pretty simple as we just have to deal with labels.
"""
# caller is responsible for ensuring non-None axis
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(
slice_obj.start, slice_obj.stop, slice_obj.step, kind="loc"
)
if isinstance(indexer, slice):
return self.obj._slice(indexer, axis=axis)
else:
# DatetimeIndex overrides Index.slice_indexer and may
# return a DatetimeIndex instead of a slice object.
return self.obj.take(indexer, axis=axis)
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on a ndarray.
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return labels._convert_slice_indexer(key, kind="loc")
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(key) and not is_int_index
if is_scalar(key) or isinstance(labels, ABCMultiIndex):
# Otherwise get_loc will raise InvalidIndexError
# if we are a label return me
try:
return labels.get_loc(key)
except LookupError:
if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
if len(key) == labels.nlevels:
return {"key": key}
raise
except InvalidIndexError:
# GH35015, using datetime as column indices raises exception
if not isinstance(labels, ABCMultiIndex):
raise
except TypeError:
pass
except ValueError:
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
# always valid
return {"key": key}
if is_nested_tuple(key, labels):
return labels.get_locs(key)
elif is_list_like_indexer(key):
if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
(inds,) = key.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
else:
try:
return labels.get_loc(key)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(key):
return {"key": key}
raise
def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
"""
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Targeted labels.
axis: int
Dimension on which the indexing is being made.
raise_missing: bool, default False
Whether to raise a KeyError if some labels were not found.
Will be removed in the future, and then this method will always behave as
if ``raise_missing=True``.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique).
values : array-like
Indexer for the return object, -1 denotes keys not found.
"""
ax = self.obj._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(
keyarr, indexer, axis, raise_missing=raise_missing
)
return ax[indexer], indexer
if ax.is_unique and not getattr(ax, "is_overlapping", False):
indexer = ax.get_indexer_for(keyarr)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
return keyarr, indexer
def _validate_read_indexer(
self, key, indexer, axis: int, raise_missing: bool = False
):
"""
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis: int
Dimension on which the indexing is being made.
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
"""
ax = self.obj._get_axis(axis)
if len(key) == 0:
return
# Count missing values:
missing_mask = indexer < 0
missing = (missing_mask).sum()
if missing:
if missing == len(indexer):
axis_name = self.obj._get_axis_name(axis)
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
# We (temporarily) allow for some missing keys with .loc, except in
# some cases (e.g. setting) in which "raise_missing" will be False
if raise_missing:
not_found = list(set(key) - set(ax))
raise KeyError(f"{not_found} not in index")
# we skip the warning on Categorical
# as this check is actually done (check for
# non-missing values), but a bit later in the
# code, so we want to avoid warning & then
# just raising
if not ax.is_categorical():
not_found = key[missing_mask]
with option_context("display.max_seq_items", 10, "display.width", 80):
raise KeyError(
"Passing list-likes to .loc or [] with any missing labels "
"is no longer supported. "
f"The following labels were missing: {not_found}. "
"See https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
)
@doc(IndexingMixin.iloc)
class _iLocIndexer(_LocationIndexer):
_valid_types = (
"integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array"
)
_takeable = True
# -------------------------------------------------------------------
# Key Checks
def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
if hasattr(key, "index") and isinstance(key.index, Index):
if key.index.inferred_type == "integer":
raise NotImplementedError(
"iLocation based boolean "
"indexing on an integer type "
"is not available"
)
raise ValueError(
"iLocation based boolean indexing cannot use "
"an indexable as a mask"
)
return
if isinstance(key, slice):
return
elif is_integer(key):
self._validate_integer(key, axis)
elif isinstance(key, tuple):
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
raise IndexingError("Too many indexers")
elif is_list_like_indexer(key):
arr = np.array(key)
len_axis = len(self.obj._get_axis(axis))
# check that the key has a numeric dtype
if not is_numeric_dtype(arr.dtype):
raise IndexError(f".iloc requires numeric indexers, got {arr}")
# check that the key does not exceed the maximum size of the index
if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
raise IndexError("positional indexers are out-of-bounds")
else:
raise ValueError(f"Can only index by location with a [{self._valid_types}]")
def _has_valid_setitem_indexer(self, indexer) -> bool:
"""
Validate that a positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally.
Returns
-------
bool
"""
if isinstance(indexer, dict):
raise IndexError("iloc cannot enlarge its target object")
else:
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("iloc cannot enlarge its target object")
elif isinstance(i, dict):
raise IndexError("iloc cannot enlarge its target object")
return True
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
-------
bool
"""
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if len(key) != self.ndim:
return False
for k in key:
if not is_integer(k):
return False
return True
def _validate_integer(self, key: int, axis: int) -> None:
"""
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position.
axis : int
Desired axis.
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'.
"""
len_axis = len(self.obj._get_axis(axis))
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
# -------------------------------------------------------------------
def _getitem_tuple(self, tup: Tuple):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
return self._getitem_tuple_same_dim(tup)
def _get_list_axis(self, key, axis: int):
"""
Return Series values by list or array of integers.
Parameters
----------
key : list-like positional indexer
axis : int
Returns
-------
Series object
Notes
-----
`axis` can only be zero.
"""
try:
return self.obj._take_with_is_copy(key, axis=axis)
except IndexError as err:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds") from err
def _getitem_axis(self, key, axis: int):
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
key = np.asarray(key)
if com.is_bool_indexer(key):
self._validate_key(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = item_from_zerodim(key)
if not is_integer(key):
raise TypeError("Cannot index by location index with a non-integer key")
# validate the location
self._validate_integer(key, axis)
return self.obj._ixs(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
# caller is responsible for ensuring non-None axis
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
labels._validate_positional_slice(slice_obj)
return self.obj._slice(slice_obj, axis=axis)
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Much simpler as we only have to deal with our valid types.
"""
return key
def _get_setitem_indexer(self, key):
# GH#32257 Fall through to let numpy do validation
return key
# -------------------------------------------------------------------
def _setitem_with_indexer(self, indexer, value):
"""
_setitem_with_indexer is for setting values on a Series/DataFrame
using positional indexers.
If the relevant keys are not present, the Series/DataFrame may be
expanded.
This method is currently broken when dealing with non-unique Indexes,
since it goes from positional indexers back to labels when calling
BlockManager methods, see GH#12991, GH#22046, GH#15686.
"""
# also has the side effect of consolidating in-place
from pandas import Series
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._mgr.blocks:
(blk,) = self.obj._mgr.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value, dict) else value
take_split_path = not blk._can_hold_element(val)
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360, GH 27841
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
if isinstance(ax, ABCMultiIndex) and not (
is_integer(i) or com.is_null_slice(i)
):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == info_axis:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = (
len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i
)
if any(not l for l in len_non_info_axes):
if not is_list_like_indexer(value):
raise ValueError(
"cannot set a frame with no "
"defined index and a scalar"
)
self.obj[key] = value
return
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes
)
self._setitem_with_indexer(new_indexer, value)
return
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._mgr = self.obj.reindex(labels, axis=i)._mgr
self.obj._maybe_update_cacher(clear=True)
self.obj._is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
self._setitem_with_indexer_missing(indexer, value)
return
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
# Above we only set take_split_path to True for 2D cases
assert self.ndim == 2
assert info_axis == 1
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# Ensure we have something we can iterate over
ilocs = info_idx
if isinstance(info_idx, slice):
ri = Index(range(len(self.obj.columns)))
ilocs = ri[info_idx]
plane_indexer = indexer[:1]
lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
# lplane_indexer gives the expected length of obj[indexer[0]]
if len(labels) == 1:
# We can operate on a single column
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and 0 != lplane_indexer != len(value):
# Exclude zero-len for e.g. boolean masking that is all-false
raise ValueError(
"cannot set using a multi-index "
"selection indexer with a different "
"length than the value"
)
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
def isetter(loc, v):
# positional setting on column loc
ser = self.obj._ixs(loc, axis=1)
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if isinstance(pi, tuple) and all(
com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj))
for idx in pi
):
ser = v
else:
# set the item, possibly having a dtype change
ser = ser.copy()
ser._mgr = ser._mgr.setitem(indexer=pi, value=v)
ser._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj._iset_item(loc, ser)
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame):
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, ABCMultiIndex)
# TODO: we are implicitly assuming value.columns is unique
for loc in ilocs:
item = item_labels[loc]
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item], multiindex_indexer
)
else:
v = np.nan
isetter(loc, v)
# we have an equal len ndarray/convertible to our labels
# hasattr first, to avoid coercing to ndarray without reason.
# But we may be relying on the ndarray coercion to check ndim.
# Why not just convert to an ndarray earlier on if needed?
elif np.ndim(value) == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(ilocs) != value.shape[1]:
raise ValueError(
"Must have equal len keys and value "
"when setting with an ndarray"
)
for i, loc in enumerate(ilocs):
# setting with a list, re-coerces
isetter(loc, value[:, i].tolist())
elif (
len(labels) == 1
and lplane_indexer == len(value)
and not is_scalar(plane_indexer[0])
):
# we have an equal len list/ndarray
# We only get here with len(labels) == len(ilocs) == 1
isetter(ilocs[0], value)
elif lplane_indexer == 0 and len(value) == len(self.obj.index):
# We get here in one case via .loc with a all-False mask
pass
else:
# per-label values
if len(ilocs) != len(value):
raise ValueError(
"Must have equal len keys and value "
"when setting with an iterable"
)
for loc, v in zip(ilocs, value):
isetter(loc, v)
else:
# scalar value
for loc in ilocs:
isetter(loc, value)
else:
if isinstance(indexer, tuple):
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
len(indexer) > info_axis
and is_integer(indexer[info_axis])
and all(
com.is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis
)
and item_labels.is_unique
):
self.obj[item_labels[indexer[info_axis]]] = value
return
indexer = maybe_convert_ix(*indexer)
if isinstance(value, (ABCSeries, dict)):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
def _setitem_with_indexer_missing(self, indexer, value):
"""
Insert new row(s) or column(s) into the Series or DataFrame.
"""
from pandas import Series
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH#12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer, value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
# GH#22717 handle casting compatibility that np.concatenate
# does incorrectly
new_values = concat_compat([self.obj._values, new_values])
self.obj._mgr = self.obj._constructor(
new_values, index=new_index, name=self.obj.name
)._mgr
self.obj._maybe_update_cacher(clear=True)
elif self.ndim == 2:
if not len(self.obj.columns):
# no columns and scalar
raise ValueError("cannot set a frame with no defined columns")
if isinstance(value, ABCSeries):
# append a Series
value = value.reindex(index=self.obj.columns, copy=True)
value.name = indexer
elif isinstance(value, dict):
value = Series(
value, index=self.obj.columns, name=indexer, dtype=object
)
else:
# a list-list
if is_list_like_indexer(value):
# must have conforming columns
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with mismatched columns")
value = Series(value, index=self.obj.columns, name=indexer)
self.obj._mgr = self.obj.append(value)._mgr
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False):
"""
Parameters
----------
indexer : tuple, slice, scalar
Indexer used to get the locations that will be set to `ser`.
ser : pd.Series
Values to assign to the locations specified by `indexer`.
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns
-------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
def ravel(i):
return i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.ndim == 2
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
len_indexer = len(indexer[1])
ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError("Incompatible indexer with Series")
def _align_frame(self, indexer, df: ABCDataFrame):
is_frame = self.ndim == 2
if isinstance(indexer, tuple):
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if isinstance(ix, np.ndarray):
ix = ix.ravel()
if idx is None:
idx = ax[ix]
elif cols is None:
cols = ax[ix]
else:
break
else:
sindexers.append(i)
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame:
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (
isinstance(ax, ABCMultiIndex)
and isinstance(df.index, ABCMultiIndex)
and ax.nlevels != df.index.nlevels
):
raise TypeError(
"cannot align on a multi-index with out "
"specifying the join levels"
)
val = df.reindex(index=ax)._values
return val
raise ValueError("Incompatible indexer with DataFrame")
class _ScalarAccessIndexer(_NDFrameIndexerBase):
"""
Access scalars quickly.
"""
def _convert_key(self, key, is_setter: bool = False):
raise AbstractMethodError(self)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError("Invalid call for scalar access (getting)!")
key = self._convert_key(key)
return self.obj._get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
else:
# scalar callable may return tuple
key = com.apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = _tuplify(self.ndim, key)
key = list(self._convert_key(key, is_setter=True))
if len(key) != self.ndim:
raise ValueError("Not enough indexers for scalar access (setting)!")
self.obj._set_value(*key, value=value, takeable=self._takeable)
@doc(IndexingMixin.at)
class _AtIndexer(_ScalarAccessIndexer):
_takeable = False
def _convert_key(self, key, is_setter: bool = False):
"""
Require they keys to be the same type as the index. (so we don't
fallback)
"""
# GH 26989
# For series, unpacking key needs to result in the label.
# This is already the case for len(key) == 1; e.g. (1,)
if self.ndim == 1 and len(key) > 1:
key = (key,)
# allow arbitrary setting
if is_setter:
return list(key)
return key
@property
def _axes_are_unique(self) -> bool:
# Only relevant for self.ndim == 2
assert self.ndim == 2
return self.obj.index.is_unique and self.obj.columns.is_unique
def __getitem__(self, key):
if self.ndim == 2 and not self._axes_are_unique:
# GH#33041 fall back to .loc
if not isinstance(key, tuple) or not all(is_scalar(x) for x in key):
raise ValueError("Invalid call for scalar access (getting)!")
return self.obj.loc[key]
return super().__getitem__(key)
def __setitem__(self, key, value):
if self.ndim == 2 and not self._axes_are_unique:
# GH#33041 fall back to .loc
if not isinstance(key, tuple) or not all(is_scalar(x) for x in key):
raise ValueError("Invalid call for scalar access (setting)!")
self.obj.loc[key] = value
return
return super().__setitem__(key, value)
@doc(IndexingMixin.iat)
class _iAtIndexer(_ScalarAccessIndexer):
_takeable = True
def _convert_key(self, key, is_setter: bool = False):
"""
Require integer args. (and convert to label arguments)
"""
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer indexers")
return key
def _tuplify(ndim: int, loc: Hashable) -> Tuple[Union[Hashable, slice], ...]:
"""
Given an indexer for the first dimension, create an equivalent tuple
for indexing over all dimensions.
Parameters
----------
ndim : int
loc : object
Returns
-------
tuple
"""
_tup: List[Union[Hashable, slice]]
_tup = [slice(None, None) for _ in range(ndim)]
_tup[0] = loc
return tuple(_tup)
def convert_to_index_sliceable(obj: "DataFrame", key):
"""
If we are index sliceable, then return my slicer, otherwise return None.
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind="getitem")
elif isinstance(key, str):
# we are an actual column
if key in obj.columns:
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx._supports_partial_string_indexing:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def check_bool_indexer(index: Index, key) -> np.ndarray:
"""
Check if key is a valid boolean indexer for an object with such index and
perform reindexing or conversion if needed.
This function assumes that is_bool_indexer(key) == True.
Parameters
----------
index : Index
Index of the object on which the indexing is done.
key : list-like
Boolean indexer to check.
Returns
-------
np.array
Resulting key.
Raises
------
IndexError
If the key does not have the same length as index.
IndexingError
If the index of the key is unalignable to index.
"""
result = key
if isinstance(key, ABCSeries) and not key.index.equals(index):
result = result.reindex(index)
mask = isna(result._values)
if mask.any():
raise IndexingError(
"Unalignable boolean Series provided as "
"indexer (index of the boolean Series and of "
"the indexed object do not match)."
)
return result.astype(bool)._values
if is_object_dtype(key):
# key might be object-dtype bool, check_array_indexer needs bool array
result = np.asarray(result, dtype=bool)
elif not is_array_like(result):
# GH 33924
# key may contain nan elements, check_array_indexer needs bool array
result = pd_array(result, dtype=bool)
return check_array_indexer(index, result)
def convert_missing_indexer(indexer):
"""
Reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer["key"]
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
"""
Create a filtered indexer that doesn't have any missing indexers.
"""
def get_indexer(_i, _idx):
return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx
return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product.
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels) -> bool:
"""
Returns
-------
bool
"""
# check for a compatible nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
for k in tup:
if is_list_like(k) or isinstance(k, slice):
return isinstance(labels, ABCMultiIndex)
return False
def is_label_like(key) -> bool:
"""
Returns
-------
bool
"""
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj) -> bool:
"""
Returns
-------
bool
"""
return (
obj.start is not None
or obj.stop is not None
or (obj.step is not None and obj.step != 1)
)
def _non_reducing_slice(slice_):
"""
Ensure that a slice doesn't reduce to a Series or Scalar.
Any user-passed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part) -> bool:
"""
Returns
-------
bool
True if slice does *not* reduce,
False if `part` is a tuple.
"""
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return (isinstance(part, slice) or is_list_like(part)) and not isinstance(
part, tuple
)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
Want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
|
the-stack_106_21409
|
"""
Custom TFX component for Firebase upload.
Author: Chansung Park
"""
from tfx import types
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx import v1 as tfx
from absl import logging
import firebase_admin
from firebase_admin import ml
from firebase_admin import storage
from firebase_admin import credentials
from google.cloud import storage as gcs_storage
@component
def FirebasePublisher(
pushed_model: tfx.dsl.components.InputArtifact[
tfx.types.standard_artifacts.PushedModel
],
credential_uri: Parameter[str],
firebase_dest_gcs_bucket: Parameter[str],
model_display_name: Parameter[str],
model_tag: Parameter[str],
) -> tfx.dsl.components.OutputDict(result=str):
"""
publish trained tflite model to Firebase ML, this component assumes that
trained model and Firebase credential files are stored in GCS locations.
Args:
pushed_model: The URI of pushed model obtained from previous component (i.e. Pusher)
credential_uri: The URI of Firebase credential. In order to get one, go to Firebase dashboard
and on the Settings page, create a service account and download the service account key file.
Keep this file safe, since it grants administrator access to your project.
firebase_dest_gcs_bucket: GCS bucket where the model is going to be temporarily stored.
In order to create one, go to Firebase dashboard and on the Storage page, enable Cloud Storage.
Take note of your bucket name.
model_display_name: The name to be appeared on Firebase ML dashboard
model_tag: The tage name to be appeared on Firebase ML dashboard
"""
model_uri = f"{pushed_model.uri}/model.tflite"
assert model_uri.split("://")[0] == "gs"
assert credential_uri.split("://")[0] == "gs"
# create gcs client instance
gcs_client = gcs_storage.Client()
# get credential for firebase
credential_gcs_bucket = credential_uri.split("//")[1].split("/")[0]
credential_blob_path = "/".join(credential_uri.split("//")[1].split("/")[1:])
bucket = gcs_client.bucket(credential_gcs_bucket)
blob = bucket.blob(credential_blob_path)
blob.download_to_filename("credential.json")
logging.info(f"download credential.json from {credential_uri} is completed")
# get tflite model file
tflite_gcs_bucket = model_uri.split("//")[1].split("/")[0]
tflite_blob_path = "/".join(model_uri.split("//")[1].split("/")[1:])
bucket = gcs_client.bucket(tflite_gcs_bucket)
blob = bucket.blob(tflite_blob_path)
blob.download_to_filename("model.tflite")
logging.info(f"download model.tflite from {model_uri} is completed")
firebase_admin.initialize_app(
credentials.Certificate("credential.json"),
options={"storageBucket": firebase_dest_gcs_bucket},
)
logging.info("firebase_admin initialize app is completed")
model_list = ml.list_models(list_filter=f"display_name={model_display_name}")
# update
if len(model_list.models) > 0:
# get the first match model
model = model_list.models[0]
source = ml.TFLiteGCSModelSource.from_tflite_model_file("model.tflite")
model.model_format = ml.TFLiteFormat(model_source=source)
updated_model = ml.update_model(model)
ml.publish_model(updated_model.model_id)
logging.info("model exists, so update it in FireBase ML")
return {"result": "model updated"}
# create
else:
# load a tflite file and upload it to Cloud Storage
source = ml.TFLiteGCSModelSource.from_tflite_model_file("model.tflite")
# create the model object
tflite_format = ml.TFLiteFormat(model_source=source)
model = ml.Model(
display_name=model_display_name,
tags=[model_tag],
model_format=tflite_format,
)
# Add the model to your Firebase project and publish it
new_model = ml.create_model(model)
ml.publish_model(new_model.model_id)
logging.info("model doesn exists, so create one in FireBase ML")
return {"result": "model created"}
|
the-stack_106_21410
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
from .authproxy import JSONRPCException
from .util import (
append_config,
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, rpchost, timewait, binary, stderr, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("BITCOIND", "bitcoind")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i,
"-noprinttoconsole"
]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "bitcoin-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError('bitcoind exited with status {} during initialization'.format(self.process.returncode))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to bitcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start(extra_args, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
raise AssertionError('Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
raise AssertionError('Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
raise AssertionError('Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes bitcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
the-stack_106_21412
|
import os
import logging
import unittest
from unittest import mock
from unittest.mock import patch
from unittest.mock import mock_open
from myredditdl.config_handler import ConfigHandler
from tests.mock_utils import get_new_client
from tests.mock_utils import get_dir_path
from tests.mock_utils import get_valid_options
class TestConfigHandler(unittest.TestCase):
MOCK_FILE = get_dir_path()
VALID_OPTIONS = get_valid_options()
def setUp(self):
self.cfg = ConfigHandler()
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_available_reddit_clients(self):
sections = ['DEFAULTS', 'USERS', 'EMPTY_CLIENT', 'FIRST', 'SECOND']
with patch.object(ConfigHandler, 'get_config_sections', return_value=sections):
self.assertEqual(
self.cfg.get_available_reddit_clients(), [
'FIRST', 'SECOND'])
@mock.patch('builtins.open', new_callable=mock_open())
@mock.patch('myredditdl.config_handler.ConfigHandler.get_config_path')
@mock.patch('myredditdl.config_handler.ConfigHandler.get_client_active_section')
def test_add_client(self, client, cfg_path, mock_open):
client.return_value = 'EMPTY_CLIENT'
cfg_path.return_value = self.MOCK_FILE
self.assertFalse(self.cfg.add_client(get_new_client('EMPTY_CLIENT')))
self.assertTrue(self.cfg.add_client(get_new_client('NEW_USER')))
@mock.patch('builtins.open', new_callable=mock_open())
@mock.patch('myredditdl.config_handler.ConfigHandler.get_config_path')
@mock.patch('myredditdl.config_handler.ConfigHandler.get_client_active_section')
def test_set_new_current_user(self, client, cfg_path, mock_open):
client.return_value = 'EMPTY_CLIENT'
cfg_path.return_value = self.MOCK_FILE
self.assertFalse(self.cfg.set_new_current_user('EMPTY_CLIENT'))
self.assertFalse(self.cfg.set_new_current_user('EMPTY_CLIENT'))
self.assertTrue(self.cfg.set_new_current_user('new_user'))
@mock.patch('myredditdl.config_handler.ConfigHandler.get_valid_prefix_options')
@mock.patch('myredditdl.config_handler.ConfigHandler.get_prefix')
def test_set_prefix_option(self, mock_prefix, mock_options):
mock_prefix.return_value = 'subreddit_name'
mock_options.return_value = self.VALID_OPTIONS
self.assertFalse(self.cfg.set_prefix_option('subreddit_name'))
self.assertTrue(self.cfg.set_prefix_option('username'))
self.assertTrue(self.cfg.set_prefix_option('username_subreddit'))
self.assertFalse(self.cfg.set_prefix_option('sub_name'))
def test_default_media_path(self):
home = os.path.expanduser('~/Pictures/')
with patch.object(ConfigHandler, 'get_client_active_section', return_value='SATORU'):
self.assertEqual(
self.cfg.get_default_media_path(),
home + 'SATORU_reddit' + os.sep)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_21413
|
import torch as t
import torch.nn as nn
import torch.nn.functional as F
#################################
# ## TOY NETWORK FOR 2D DATA ## #
#################################
class ToyNet(nn.Module):
def __init__(self, dim=2, n_f=32, leak=0.05):
super(ToyNet, self).__init__()
self.f = nn.Sequential(
nn.Conv2d(dim, n_f, 1, 1, 0),
nn.LeakyReLU(leak),
nn.Conv2d(n_f, n_f * 2, 1, 1, 0),
nn.LeakyReLU(leak),
nn.Conv2d(n_f * 2, n_f * 2, 1, 1, 0),
nn.LeakyReLU(leak),
nn.Conv2d(n_f * 2, n_f * 2, 1, 1, 0),
nn.LeakyReLU(leak),
nn.Conv2d(n_f * 2, 1, 1, 1, 0))
def forward(self, x):
return self.f(x).squeeze()
#########################
# ## VANILLA CONVNET ## #
#########################
class VanillaNet(nn.Module):
def __init__(self, n_c=3, n_f=32, leak=0.05):
super(VanillaNet, self).__init__()
self.f = nn.Sequential(
nn.Conv2d(n_c, n_f, 3, 1, 1),
nn.LeakyReLU(leak),
nn.Conv2d(n_f, n_f * 2, 4, 2, 1),
nn.LeakyReLU(leak),
nn.Conv2d(n_f*2, n_f*4, 4, 2, 1),
nn.LeakyReLU(leak),
nn.Conv2d(n_f*4, n_f*8, 4, 2, 1),
nn.LeakyReLU(leak),
nn.Conv2d(n_f*8, 1, 4, 1, 0))
def forward(self, x):
return self.f(x).squeeze()
######################
# ## NONLOCAL NET ## #
######################
# implementation with minor changes from https://github.com/AlexHex7/Non-local_pytorch
# Original Version: Copyright (c) 2018 AlexHex7
class NonlocalNet(nn.Module):
def __init__(self, n_c=3, n_f=32, leak=0.05):
super(NonlocalNet, self).__init__()
self.convs = nn.Sequential(
nn.Conv2d(in_channels=n_c, out_channels=n_f, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=leak),
nn.MaxPool2d(2),
NonLocalBlock(in_channels=n_f),
nn.Conv2d(in_channels=n_f, out_channels=n_f * 2, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=leak),
nn.MaxPool2d(2),
NonLocalBlock(in_channels=n_f * 2),
nn.Conv2d(in_channels=n_f * 2, out_channels=n_f * 4, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=leak),
nn.MaxPool2d(2)
)
self.fc = nn.Sequential(
nn.Linear(in_features=(n_f * 4) * 4 * 4, out_features=n_f * 8),
nn.LeakyReLU(negative_slope=leak),
nn.Linear(in_features=n_f * 8, out_features=1)
)
def forward(self, x):
conv_out = self.convs(x).view(x.shape[0], -1)
return self.fc(conv_out).squeeze()
# structure of non-local block (from Non-Local Neural Networks https://arxiv.org/abs/1711.07971)
class NonLocalBlock(nn.Module):
def __init__(self, in_channels, sub_sample=True):
super(NonLocalBlock, self).__init__()
self.in_channels = in_channels
self.inter_channels = max(1, in_channels // 2)
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, nn.MaxPool2d(kernel_size=(2, 2)))
self.phi = nn.Sequential(self.phi, nn.MaxPool2d(kernel_size=(2, 2)))
def forward(self, x):
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = t.matmul(theta_x, phi_x)
f_div_c = F.softmax(f, dim=-1)
y = t.matmul(f_div_c, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
w_y = self.W(y)
z = w_y + x
return z
|
the-stack_106_21417
|
from __future__ import print_function, division
import os
import time
import pwd
import datetime
import collections
import pprint
import math
import array
import json
import copy
import argparse
import uuid
import pdb
import numpy as np
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch(True)
def main(analysisDirectory, era, variable, verbose=False):
categoriesOfInterest = ['HT500_nMediumDeepJetB2_nJet4', 'HT500_nMediumDeepJetB2_nJet5', 'HT500_nMediumDeepJetB2_nJet6',
'HT500_nMediumDeepJetB2_nJet7', 'HT500_nMediumDeepJetB2_nJet8+',
'HT500_nMediumDeepJetB3_nJet4', 'HT500_nMediumDeepJetB3_nJet5', 'HT500_nMediumDeepJetB3_nJet6',
'HT500_nMediumDeepJetB3_nJet7', 'HT500_nMediumDeepJetB3_nJet8+',
'HT500_nMediumDeepJetB4+_nJet4', 'HT500_nMediumDeepJetB4+_nJet5', 'HT500_nMediumDeepJetB4+_nJet6',
'HT500_nMediumDeepJetB4+_nJet7', 'HT500_nMediumDeepJetB4+_nJet8+',
]
histogramFile = "$ADIR/Combine/All/$ERA___Combined.root".replace("$ADIR", analysisDir).replace("$ERA", era).replace("//", "/")
f = ROOT.TFile.Open(histogramFile, "read")
keys = [k.GetName() for k in f.GetListOfKeys()]
if len(keys[0].split("___")) == 7: #format post-fill-(histograms/combine)
eras = sorted(list(set([k.split("___")[0] for k in keys])))
samples = sorted(list(set([k.split("___")[1] for k in keys])))
channels = sorted(list(set([k.split("___")[2] for k in keys])))
channelWindows = list(set(["___".join(k.split("___")[2:4]) for k in keys]))
categories = sorted(sorted(list(set([k.split("___")[4] for k in keys])), key=lambda j : j.split("nJet")[-1]), key=lambda j: j.split("nMediumDeep")[-1])
variables = list(set([k.split("___")[5] for k in keys]))
systematics = list(set([k.split("___")[6] for k in keys]))
else:
raise RuntimeError("Unhandled histogram key format: length: {} key: {}".format(len(keys[0].split("___")), keys[0]))
supportedTypes = (ROOT.TH1, ROOT.TH2, ROOT.TH3)
if verbose:
print("Eras: {}".format(eras))
print("Samples: {}".format(samples))
print("Channels: {}".format(channels))
print("Channel + Z Windows: {}".format(channelWindows))
print("Categories: {}".format(categories))
print("Variables: {}".format(variables))
print("Systematics: {}".format(systematics))
merge = dict()
for mera in eras:
merge[mera] = dict()
for msample in samples:
merge[mera][msample] = dict()
for mvariable in variables:
if mvariable not in [variable, variable + "Unweighted"]:
continue
merge[mera][msample][mvariable] = dict()
for msyst in systematics:
merge[mera][msample][mvariable][msyst] = dict()
for mcategory in ["nMediumDeepJetB2", "nMediumDeepJetB3", "nMediumDeepJetB4+"]:
merge[mera][msample][mvariable][msyst][mcategory] = []
for key in keys:
mera, msample, mchannel, mwindow, mcategory, mvariable, msyst = key.split("___")
if mvariable not in [variable, variable + "Unweighted"] or mera != era:
continue
mcat = mcategory.split("_")[-2].replace("BLIND", "")
merge[mera][msample][mvariable][msyst][mcat].append(key)
outputHistogramFile = histogramFile.replace("Combined", "").replace(".root", "MergedChannelsJets_" + variable + ".root")
print("Opening {}".format(outputHistogramFile))
of = ROOT.TFile.Open(outputHistogramFile, "recreate")
of.cd()
for mera, submerge in merge.items():
for msample, subsubmerge in submerge.items():
print("Writing results for {}".format(msample))
for mvariable, subsubsubmerge in subsubmerge.items():
for msyst, subsubsubsubmerge in subsubsubmerge.items():
print("*", end="")
for mcat, subsubsubsubsubmerge in subsubsubsubmerge.items():
mergeName = "___".join([mera, msample, "All", "ZWindow", "MergedChannelsJets_" + mcat, mvariable, msyst])
hist = None
blind = len([hk for hk in subsubsubsubsubmerge if "blind" in hk.lower()]) > 0
for histKey in subsubsubsubsubmerge:
rootobj = f.Get(histKey)
if isinstance(rootobj, supportedTypes):
if hist is None:
hist = rootobj.Clone(mergeName)
else:
hist.Add(rootobj)
if blind:
for bin in range(hist.GetNbinsX() + 2):
hist.SetBinContent(bin, 0)
hist.SetBinError(bin, 0)
if hist is not None:
pass
# hist.Write()
print("")
f.Close()
of.Close()
print("Done")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge Jet categories and decay channels for the purpose of btag-dependent systematic studies')
parser.add_argument('--analysisDirectory', dest='analysisDirectory', action='store', type=str, default="/eos/user/$U/$USER/analysis/$DATE", required=True,
help='output directory path defaulting to "."')
parser.add_argument('--era', dest='era', type=str, default="2017", required=True,
help='era for plotting, which deduces the lumi only for now')
parser.add_argument('--variable', dest='variable', type=str, default="HT", required=True,
help='Variable to be merged across channels and BTag categories')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Enable more verbose output during actions')
#Parse the arguments
args = parser.parse_args()
uname = pwd.getpwuid(os.getuid()).pw_name
uinitial = uname[0]
dateToday = datetime.date.today().strftime("%b-%d-%Y")
analysisDir = args.analysisDirectory.replace("$USER", uname).replace("$U", uinitial).replace("$DATE", dateToday)
verbose = args.verbose
main(analysisDir, era=args.era, variable=args.variable, verbose=verbose)
|
the-stack_106_21420
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import time as _time
import datetime as _datetime
import requests as _requests
import pandas as _pd
import numpy as _np
try:
from urllib.parse import quote as urlencode
except ImportError:
from urllib import quote as urlencode
from . import utils
# import json as _json
# import re as _re
# import sys as _sys
from . import shared
class TickerBase():
def __init__(self, ticker):
self.ticker = ticker.upper()
self._history = None
self._base_url = 'https://query1.finance.yahoo.com'
self._scrape_url = 'https://finance.yahoo.com/quote'
self._fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._major_holders = None
self._institutional_holders = None
self._mutualfund_holders = None
self._isin = None
self._calendar = None
self._expirations = {}
self._earnings = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._financials = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._balancesheet = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._cashflow = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False, actions=True,
auto_adjust=True, back_adjust=False,
proxy=None, rounding=False, tz=None, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or _datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is True
back_adjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
if start or period is None or period.lower() == "max":
if start is None:
start = -2208988800
elif isinstance(start, _datetime.datetime):
start = int(_time.mktime(start.timetuple()))
else:
start = int(_time.mktime(
_time.strptime(str(start), '%Y-%m-%d')))
if end is None:
end = int(_time.time())
elif isinstance(end, _datetime.datetime):
end = int(_time.mktime(end.timetuple()))
else:
end = int(_time.mktime(_time.strptime(str(end), '%Y-%m-%d')))
params = {"period1": start, "period2": end}
else:
period = period.lower()
params = {"range": period}
params["interval"] = interval.lower()
params["includePrePost"] = prepost
params["events"] = "div,splits"
# 1) fix weired bug with Yahoo! - returning 60m for 30m bars
if params["interval"] == "30m":
params["interval"] = "15m"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self._base_url, self.ticker)
data = _requests.get(url=url, params=params, proxies=proxy)
if "Will be right back" in data.text:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
"Our engineers are working quickly to resolve "
"the issue. Thank you for your patience.")
data = data.json()
# Work with errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range, symbol may be delisted"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or \
not data["chart"]["result"]:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# parse quotes
try:
quotes = utils.parse_quotes(data["chart"]["result"][0], tz)
except Exception:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = _pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if auto_adjust:
quotes = utils.auto_adjust(quotes)
elif back_adjust:
quotes = utils.back_adjust(quotes)
if rounding:
quotes = _np.round(quotes, data[
"chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(_np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = utils.parse_actions(data["chart"]["result"][0], tz)
# combine
df = _pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
if params["interval"][-1] == "m":
df.index.name = "Datetime"
else:
df.index = _pd.to_datetime(df.index.date)
if tz is not None:
df.index = df.index.tz_localize(tz)
df.index.name = "Date"
self._history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df
# ------------------------
def _get_fundamentals(self, kind=None, proxy=None):
def cleanup(data):
df = _pd.DataFrame(data)
if 'maxAge' in df:
df = df.drop(columns=['maxAge'])
for col in df.columns:
df[col] = _np.where(
df[col].astype(str) == '-', _np.nan, df[col])
if 'endDate' in df:
df.set_index('endDate', inplace=True)
try:
df.index = _pd.to_datetime(df.index, unit='s')
except ValueError:
df.index = _pd.to_datetime(df.index)
df = df.T
df.columns.name = ''
df.index.name = 'Breakdown'
df.index = utils.camel2title(df.index)
return df
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
if self._fundamentals:
return
# get info and sustainability
url = '%s/%s' % (self._scrape_url, self.ticker)
data = utils.get_json(url, proxy)
# holders
url = "{}/{}".format(self._scrape_url, self.ticker)
holders = _pd.read_html(url+'/holders')
if len(holders)>=3:
self._major_holders = holders[0]
self._institutional_holders = holders[1]
self._mutualfund_holders = holders[2]
elif len(holders)>=2:
self._major_holders = holders[0]
self._institutional_holders = holders[1]
else:
self._major_holders = holders[0]
#self._major_holders = holders[0]
#self._institutional_holders = holders[1]
if self._institutional_holders is not None:
if 'Date Reported' in self._institutional_holders:
self._institutional_holders['Date Reported'] = _pd.to_datetime(
self._institutional_holders['Date Reported'])
if '% Out' in self._institutional_holders:
self._institutional_holders['% Out'] = self._institutional_holders[
'% Out'].str.replace('%', '').astype(float)/100
if self._mutualfund_holders is not None:
if 'Date Reported' in self._mutualfund_holders:
self._mutualfund_holders['Date Reported'] = _pd.to_datetime(
self._mutualfund_holders['Date Reported'])
if '% Out' in self._mutualfund_holders:
self._mutualfund_holders['% Out'] = self._mutualfund_holders[
'% Out'].str.replace('%', '').astype(float)/100
# sustainability
d = {}
if isinstance(data.get('esgScores'), dict):
for item in data['esgScores']:
if not isinstance(data['esgScores'][item], (dict, list)):
d[item] = data['esgScores'][item]
s = _pd.DataFrame(index=[0], data=d)[-1:].T
s.columns = ['Value']
if(len(s[s.index == 'ratingYear']['Value'].values) > 0 and len(s[s.index == 'ratingMonth']['Value'].values) > 0 ):
s.index.name = '%.f-%.f' % (s[s.index == 'ratingYear']['Value'].values[0], s[s.index == 'ratingMonth']['Value'].values[0])
else:
s.index.name = '%.f-%.f' % (0, 0)
self._sustainability = s[~s.index.isin(
['maxAge', 'ratingYear', 'ratingMonth'])]
# info (be nice to python 2)
self._info = {}
items = ['summaryProfile', 'summaryDetail', 'quoteType',
'defaultKeyStatistics', 'assetProfile', 'summaryDetail']
for item in items:
if isinstance(data.get(item), dict):
self._info.update(data[item])
if 'regularMarketOpen' in self._info:
self._info['regularMarketPrice'] = self._info['regularMarketOpen']
else:
self._info['regularMarketPrice'] = ''
self._info['logo_url'] = ""
try:
domain = self._info['website'].split(
'://')[1].split('/')[0].replace('www.', '')
self._info['logo_url'] = 'https://logo.clearbit.com/%s' % domain
except Exception:
pass
# events
try:
cal = _pd.DataFrame(
data['calendarEvents']['earnings'])
cal['earningsDate'] = _pd.to_datetime(
cal['earningsDate'], unit='s')
self._calendar = cal.T
self._calendar.index = utils.camel2title(self._calendar.index)
self._calendar.columns = ['Value']
except Exception:
pass
# analyst recommendations
try:
rec = _pd.DataFrame(
data['upgradeDowngradeHistory']['history'])
rec['earningsDate'] = _pd.to_datetime(
rec['epochGradeDate'], unit='s')
rec.set_index('earningsDate', inplace=True)
rec.index.name = 'Date'
rec.columns = utils.camel2title(rec.columns)
self._recommendations = rec[[
'Firm', 'To Grade', 'From Grade', 'Action']].sort_index()
except Exception:
pass
# get fundamentals
data = utils.get_json(url+'/financials', proxy)
# generic patterns
for key in (
(self._cashflow, 'cashflowStatement', 'cashflowStatements'),
(self._balancesheet, 'balanceSheet', 'balanceSheetStatements'),
(self._financials, 'incomeStatement', 'incomeStatementHistory')
):
item = key[1] + 'History'
if isinstance(data.get(item), dict):
key[0]['yearly'] = cleanup(data[item][key[2]])
item = key[1]+'HistoryQuarterly'
if isinstance(data.get(item), dict):
key[0]['quarterly'] = cleanup(data[item][key[2]])
# earnings
if isinstance(data.get('earnings'), dict):
earnings = data['earnings']['financialsChart']
df = _pd.DataFrame(earnings['yearly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Year'
self._earnings['yearly'] = df
df = _pd.DataFrame(earnings['quarterly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Quarter'
self._earnings['quarterly'] = df
self._fundamentals = True
def get_recommendations(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._recommendations
if as_dict:
return data.to_dict()
return data
def get_calendar(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._calendar
if as_dict:
return data.to_dict()
return data
def get_major_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._major_holders
if as_dict:
return data.to_dict()
return data
def get_institutional_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._institutional_holders
if data is not None:
if as_dict:
return data.to_dict()
return data
def get_mutualfund_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._mutualfund_holders
if data is not None:
if as_dict:
return data.to_dict()
return data
def get_info(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._info
if as_dict:
return data.to_dict()
return data
def get_sustainability(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._sustainability
if as_dict:
return data.to_dict()
return data
def get_earnings(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._earnings[freq]
if as_dict:
return data.to_dict()
return data
def get_financials(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._financials[freq]
if as_dict:
return data.to_dict()
return data
def get_balancesheet(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._balancesheet[freq]
if as_dict:
return data.to_dict()
return data
def get_balance_sheet(self, proxy=None, as_dict=False, freq="yearly"):
return self.get_balancesheet(proxy, as_dict, freq)
def get_cashflow(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._cashflow[freq]
if as_dict:
return data.to_dict()
return data
def get_dividends(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
dividends = self._history["Dividends"]
return dividends[dividends != 0]
def get_splits(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
splits = self._history["Stock Splits"]
return splits[splits != 0]
def get_actions(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
actions = self._history[["Dividends", "Stock Splits"]]
return actions[actions != 0].dropna(how='all').fillna(0)
def get_isin(self, proxy=None):
# *** experimental ***
if self._isin is not None:
return self._isin
ticker = self.ticker.upper()
if "-" in ticker or "^" in ticker:
self._isin = '-'
return self._isin
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
q = ticker
self.get_info(proxy=proxy)
if "shortName" in self._info:
q = self._info['shortName']
url = 'https://markets.businessinsider.com/ajax/' \
'SearchController_Suggest?max_results=25&query=%s' \
% urlencode(q)
data = _requests.get(url=url, proxies=proxy).text
search_str = '"{}|'.format(ticker)
if search_str not in data:
if q.lower() in data.lower():
search_str = '"|'.format(ticker)
if search_str not in data:
self._isin = '-'
return self._isin
else:
self._isin = '-'
return self._isin
self._isin = data.split(search_str)[1].split('"')[0].split('|')[0]
return self._isin
|
the-stack_106_21423
|
"""
functions to be imported into the presidents Flask app
"""
import csv
def raiseError(e):
error_text = "<p>The error:<br>" + str(e) + "</p>"
hed = '<h1>Something is broken.</h1>'
return hed + error_text
def convert_to_dict(filename):
"""
Convert a CSV file to a list of Python dictionaries
"""
# open a CSV file - note - must have column headings in top row
datafile = open(filename, newline='')
# create DictReader object
my_reader = csv.DictReader(datafile)
# create a regular Python list containing dicts
list_of_dicts = list(my_reader)
# close original csv file
datafile.close()
# return the list
return list_of_dicts
def make_ordinal(num):
"""
Create an ordinal (1st, 2nd, etc.) from a number.
"""
base = num % 10
if base in [0,4,5,6,7,8,9] or num in [11,12,13]:
ext = "th"
elif base == 1:
ext = "st"
elif base == 2:
ext = "nd"
else:
ext = "rd"
return str(num) + ext
# tryouts
def test_make_ordinal():
for i in range(1,46):
print(make_ordinal(i))
def search_the_list(list):
for item in list:
if "Whig" in item['Party']:
print(item['President'] + " was a Whig.")
for k in list[0].keys():
print(k)
# retrieve all the names from the dataset and put them into a list
def get_names(source):
names = []
for row in source:
# lowercase all the names for better searching
name = row["Title"].lower()
names.append(name)
return sorted(names)
# find the row that matches the id in the URL, retrieve name and photo
def get_recipe(source, id):
for row in source:
if id == str( row["id"] ):
name = row["name"]
photo = row["photo"]
# change number to string
id = str(id)
# return these if id is valid
return id, name, photo
# return these if id is not valid - not a great solution, but simple
return "Unknown", "Unknown", ""
# find the row that matches the name in the form and retrieve matching id
def get_id(source, name):
for row in source:
# lower() makes the string all lowercase
if name.lower() in row["Title"].lower():
id = row["id"]
# change number to string
id = str(id)
# return id if name is valid
return id
# return these if id is not valid - not a great solution, but simple
return "Unknown"
# find the row that matches the name in the form and retrieve matching id
def get_ids_list(source, name):
output_list=[]
for row in source:
# lower() makes the string all lowercase
if name.lower() in row["Title"].lower():
id = row["id"]
# change number to string
id = str(id)
# return id if name is valid
output_list.append(id)
# return these if id is not valid - not a great solution, but simple
return(output_list)
def get_recipe_titles_from_ids(source, ids_list):
titles_list=[]
for id_val in ids_list:
recipe=list(filter(lambda recipe: recipe['id'] == id_val, source))[0]['Title']
titles_list.append(recipe)
return titles_list
|
the-stack_106_21424
|
import numpy as np
from pycompss.api.api import compss_wait_on
from pycompss.api.parameter import COLLECTION_IN, COLLECTION_OUT, \
Type, Depth
from pycompss.api.task import task
from scipy.sparse import issparse
from scipy.sparse import vstack as vstack_sparse
from sklearn.base import BaseEstimator
from sklearn.utils import validation
from dislib.cluster.dbscan.classes import Region
from dislib.data.array import Array
class DBSCAN(BaseEstimator):
""" Perform DBSCAN clustering.
This algorithm requires data to be arranged in a multidimensional grid.
The fit method re-arranges input data before running the
clustering algorithm. See ``fit()`` for more details.
Parameters
----------
eps : float, optional (default=0.5)
The maximum distance between two samples for them to be considered as
in the same neighborhood.
min_samples : int, optional (default=5)
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
n_regions : int, optional (default=1)
Number of regions per dimension in which to divide the feature space.
The total number of regions generated is equal to ``n_regions`` ^
``len(dimensions)``.
dimensions : iterable, optional (default=None)
Integer indices of the dimensions of the feature space that should be
divided. If None, all dimensions are divided.
max_samples : int, optional (default=None)
Setting max_samples to an integer results in the paralellization of
the computation of distances inside each region of the grid. That
is, each region is processed using various parallel tasks, where each
task finds the neighbours of max_samples samples.
This can be used to balance the load in scenarios where samples are not
evenly distributed in the feature space.
Attributes
----------
n_clusters : int
Number of clusters found. Accessing this member performs a
synchronization.
Examples
--------
>>> from dislib.cluster import DBSCAN
>>> import dislib as ds
>>> import numpy as np
>>> arr = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]])
>>> x = ds.array(arr, block_size=(2, 2))
>>> dbscan = DBSCAN(eps=3, min_samples=2)
>>> y = dbscan.fit_predict(x)
>>> print(y.collect())
"""
def __init__(self, eps=0.5, min_samples=5, n_regions=1,
dimensions=None, max_samples=None):
assert n_regions >= 1, \
"Number of regions must be greater or equal to 1."
self.eps = eps
self.min_samples = min_samples
self.n_regions = n_regions
self.dimensions = dimensions
self.max_samples = max_samples
def fit(self, x, y=None):
""" Perform DBSCAN clustering on x.
Samples are initially rearranged in a multidimensional grid with
``n_regions`` regions per dimension in ``dimensions``. All regions
in a specific dimension have the same size.
Parameters
----------
x : ds-array
Input data.
y : ignored
Not used, present here for API consistency by convention.
Returns
-------
self : DBSCAN
"""
assert self.n_regions >= 1, \
"Number of regions must be greater or equal to 1."
self._subset_sizes = []
self._sorting = []
self._components = None
self._labels = None
n_features = x.shape[1]
sparse = x._sparse
self._dimensions = self.dimensions
if self.dimensions is None:
self._dimensions = range(n_features)
n_dims = len(self._dimensions)
arranged_data, indices, sizes = _arrange_samples(x, self.n_regions,
self._dimensions)
grid = np.empty((self.n_regions,) * n_dims, dtype=object)
region_widths = self._compute_region_widths(x)[self._dimensions]
sizes = compss_wait_on(sizes)
# Create regions
for subset_idx, region_id in enumerate(np.ndindex(grid.shape)):
subset = arranged_data[subset_idx]
subset_size = sizes[subset_idx]
grid[region_id] = Region(region_id, subset, subset_size,
self.eps, sparse)
# Set region neighbours
distances = np.ceil(self.eps / region_widths)
for region_id in np.ndindex(grid.shape):
self._add_neighbours(grid[region_id], grid, distances)
# Run dbscan on each region
for region_id in np.ndindex(grid.shape):
region = grid[region_id]
region.partial_dbscan(self.min_samples, self.max_samples)
# Compute label equivalences between different regions
equiv_list = []
for region_id in np.ndindex(grid.shape):
equiv_list.append(grid[region_id].get_equivalences())
equivalences = _merge_dicts(*equiv_list)
# Compute connected components
self._components = _get_connected_components(equivalences)
# Update region labels according to equivalences
final_labels = []
for subset_idx, region_id in enumerate(np.ndindex(grid.shape)):
region = grid[region_id]
region.update_labels(self._components)
final_labels.append(region.labels)
label_blocks = _rearrange_labels(final_labels, indices, x._n_blocks[0])
self._labels = Array(blocks=label_blocks,
top_left_shape=(x._top_left_shape[0], 1),
reg_shape=(x._reg_shape[0], 1),
shape=(x._shape[0], 1), sparse=False)
return self
def fit_predict(self, x):
""" Perform DBSCAN clustering on dataset and return cluster labels
for x.
Parameters
----------
x : ds-array
Input data.
Returns
-------
y : ds-array, shape=(n_samples , 1)
Cluster labels.
"""
self.fit(x)
return self._labels
@staticmethod
def _add_neighbours(region, grid, distances):
for ind in np.ndindex(grid.shape):
if ind == region.id:
continue
d = np.abs(np.array(region.id) - np.array(ind))
if (d <= distances).all():
region.add_neighbour(grid[ind])
@property
def n_clusters(self):
validation.check_is_fitted(self, '_components')
self._components = compss_wait_on(self._components)
return len(self._components)
def _compute_region_widths(self, x):
mn = x.min().collect()
mx = x.max().collect()
if issparse(mn):
mn = mn.toarray()
mx = mx.toarray()
return ((mx - mn) / self.n_regions).reshape(-1, )
def _arrange_samples(x, n_regions, dimensions=None):
""" Arranges samples in an n-dimensional grid. The feature space is
divided in ``n_regions`` equally sized regions on each dimension based on
the maximum and minimum values of each feature in x.
Parameters
----------
x : ds-array
Input data.
n_regions : int
Number of regions per dimension in which to split the feature space.
dimensions : iterable, optional (default=None)
Integer indices of the dimensions to split. If None, all dimensions
are split.
Returns
-------
grid_data : list
A list of nd-arrays (futures) containing the samples on each region.
That is, grid_data[i][j] contains the samples in row block i of x
that lie in region j.
sorting : list of lists
sorting[i][j] contains the sample indices of the
samples from row block i that lie in region j. The indices
are relative to row block i.
sizes : list
Sizes (futures) of the arrays in grid_data.
"""
n_features = x.shape[1]
if dimensions is None:
dimensions = range(n_features)
grid_shape = (n_regions,) * len(dimensions)
# min() and max() calls have synchronization points
mn = x.min()
mx = x.max()
bins = _generate_bins(mn._blocks, mx._blocks, dimensions, n_regions)
total_regions = n_regions ** len(dimensions)
return _arrange_data(x, grid_shape, bins, dimensions, total_regions)
def _arrange_data(x, g_shape, bins, dims, total_regions):
reg_lists = list()
ind_lists = list()
for row in x._iterator(axis=0):
reg_list = [object() for _ in range(total_regions)]
ind_list = [object() for _ in range(total_regions)]
# after calling arrange_block, reg_list contains one nd-array per
# region with the corresponding samples, and ind_list contains
# the indices of the samples that go to each region
_arrange_block(row._blocks, bins, dims, g_shape, reg_list, ind_list)
reg_lists.append(reg_list)
ind_lists.append(ind_list)
# the ith element of each element in lol contains the samples of
# the ith region.
reg_arr = np.asarray(reg_lists)
arranged_samples = list()
sizes = list()
for i in range(reg_arr.shape[1]):
# we merge together the ith element of each element in reg_arr and
# sort_arr to obtain a single nd-array per region (convert to list
# again because collections do not support np.arrays)
samples, size = _merge_samples(reg_arr[:, i].tolist(), x._sparse)
arranged_samples.append(samples)
sizes.append(size)
# arranged_samples is a list of nd-arrays (one per region) containing the
# samples in each region.
return arranged_samples, ind_lists, sizes
def _rearrange_labels(labels, indices, n_blocks):
"""
This method rearranges computed labels back to their original position.
"""
blocks_list = list()
for i, arr in enumerate(labels):
blocks = [object() for _ in range(n_blocks)]
# blocks_list[i][j] contains the labels of region i that belong to
# row block j in the original arrangement of the data
_rearrange_region(arr, np.asarray(indices)[:, i].tolist(), blocks)
blocks_list.append(blocks)
blocks_arr = np.asarray(blocks_list)
sorted_blocks = list()
# merge and sort the rearranged labels to build the final array of labels
for i in range(blocks_arr.shape[1]):
label_block = _merge_labels(blocks_arr[:, i].tolist(), indices[i])
sorted_blocks.append([label_block])
return sorted_blocks
@task(mn={Type: COLLECTION_IN, Depth: 2},
mx={Type: COLLECTION_IN, Depth: 2},
returns=1)
def _generate_bins(mn, mx, dimensions, n_regions):
bins = []
mn_arr = Array._merge_blocks(mn)[0]
mx_arr = Array._merge_blocks(mx)[0]
if issparse(mn_arr):
mn_arr = mn_arr.toarray()[0]
mx_arr = mx_arr.toarray()[0]
# create bins for the different regions in the grid in every dimension
for dim in dimensions:
bin_ = np.linspace(mn_arr[dim], mx_arr[dim], n_regions + 1)
bins.append(bin_)
return bins
@task(blocks={Type: COLLECTION_IN, Depth: 2},
samples_list={Type: COLLECTION_OUT},
indices={Type: COLLECTION_OUT})
def _arrange_block(blocks, bins, dimensions, shape, samples_list, indices):
x = Array._merge_blocks(blocks)
n_bins = shape[0]
region_indices = list()
# find the samples that belong to each region iterating over each dimension
for dim_bins, dim in zip(bins, dimensions):
col = x[:, dim]
if issparse(col):
col = col.toarray().flatten()
# digitize a dimension of all the samples into the corresponding bins
# region_idx represents the region index at dimension dim of each
# sample
region_idx = np.digitize(col, dim_bins) - 1
region_idx[region_idx >= n_bins] = n_bins - 1
region_indices.append(region_idx)
# idx_arr is an nd-array of shape (n_dimensions, n_samples), where each
# column represents the region indices of each sample (i.e., the region
# where the sample should go)
idx_arr = np.asarray(region_indices)
# apply np.ravel_multi_index to each column of idx_arr to get a 1-D index
# that represents each region in the output list
out_idx = np.apply_along_axis(np.ravel_multi_index, 0, idx_arr, dims=shape)
for i in range(len(samples_list)):
# insert all the samples that belong to a region to the corresponding
# place in the output list.
sample_indices = np.where(out_idx == i)
samples_list[i] = x[sample_indices]
# sorting contains which samples go to which region
indices[i] = sample_indices
@task(indices=COLLECTION_IN,
blocks=COLLECTION_OUT)
def _rearrange_region(labels, indices, blocks):
"""
indices[i] contains the label/sample indices of row block i (in the
original data) that lie in this region. This method
redistributes the labels into a list representing the row blocks
in the original data
"""
start, end = 0, 0
for i, ind in enumerate(indices):
end += len(ind[0])
blocks[i] = labels[start:end].reshape(-1, 1)
start = end
@task(samples_list={Type: COLLECTION_IN}, returns=2)
def _merge_samples(samples_list, sparse):
if sparse:
samples = vstack_sparse(samples_list)
else:
samples = np.vstack(samples_list)
return samples, samples.shape[0]
@task(labels_list=COLLECTION_IN, indices=COLLECTION_IN, returns=1)
def _merge_labels(labels_list, indices):
labels = np.vstack(labels_list)
# idx contains the original position of each label in labels
idx = np.hstack(np.asarray(indices).flatten())
return np.take(labels, idx).reshape(-1, 1)
@task(returns=1)
def _merge_dicts(*dicts):
merged_dict = {}
for dct in dicts:
merged_dict.update(dct)
return merged_dict
@task(returns=1)
def _get_connected_components(equiv):
# Add inverse equivalences
for node, neighs in equiv.items():
for neigh in neighs:
equiv[neigh].add(node)
visited = set()
connected = []
for node, neighbours in equiv.items():
if node in visited:
continue
connected.append([node])
_visit_neighbours(equiv, neighbours, visited, connected)
return connected
def _visit_neighbours(equiv, neighbours, visited, connected):
to_visit = list(neighbours)
while len(to_visit) > 0:
neighbour = to_visit.pop()
if neighbour in visited:
continue
visited.add(neighbour)
connected[-1].append(neighbour)
if neighbour in equiv:
to_visit.extend(equiv[neighbour])
|
the-stack_106_21425
|
#!/usr/bin/env python3
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
def main(argv):
logging.basicConfig(level=logging.INFO)
if len(argv) == 0:
bootstrap_dir = os.path.dirname(os.path.abspath(__file__))
infra_dir = os.path.dirname(bootstrap_dir)
argv = [infra_dir]
for root in argv:
# This could take an argument, except gclient DEPS has no good way to pass
# us an argument, and gclient getcwd() is ../ from our .gclient file. :(
logging.debug("Cleaning orphaned *.pyc files from: %s" % root)
for (dirpath, _, filenames) in os.walk(root):
fnset = set(filenames)
for filename in filenames:
if filename.endswith(".pyc") and filename[:-1] not in fnset:
path = os.path.join(dirpath, filename)
logging.info("Deleting orphan *.pyc file: %s" % path)
os.remove(path)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_106_21426
|
# -*- coding: utf-8 -*-
# Copyright (2017-2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import json
from unittest import mock
# 3rd party libs
from flask_api import status
from hpOneView.exceptions import HPOneViewException
# Module libs
from oneview_redfish_toolkit.blueprints import service_root
from oneview_redfish_toolkit.tests.base_flask_test import BaseFlaskTest
from oneview_redfish_toolkit import util
class TestServiceRoot(BaseFlaskTest):
"""Tests from ServiceRoot blueprint"""
@classmethod
def setUpClass(self):
super(TestServiceRoot, self).setUpClass()
self.app.register_blueprint(
service_root.service_root, url_prefix='/redfish/v1/')
@mock.patch.object(service_root, 'g')
def test_service_root_oneview_exception(self, g):
"""Tests ServiceROOT with an exception"""
e = HPOneViewException({
'errorCode': 'ANOTHER_ERROR',
'message': 'appliance error',
})
g.oneview_client.appliance_node_information.get_version.side_effect = e
response = self.client.get("/redfish/v1/")
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code
)
self.assertEqual("application/json", response.mimetype)
@mock.patch.object(service_root, 'g')
def test_service_root_exception(self, g):
"""Tests ServiceROOT with an exception"""
e = HPOneViewException({
'errorCode': 'ANOTHER_ERROR',
'message': 'appliance error',
})
g.oneview_client.appliance_node_information.get_version.side_effect = e
response = self.client.get("/redfish/v1/")
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code
)
self.assertEqual("application/json", response.mimetype)
@mock.patch.object(util, 'config')
@mock.patch.object(service_root, 'g')
def test_get_service_root(self, g, config_mock):
"""Tests ServiceRoot blueprint result against know value """
def side_effect(section, option):
if section == "redfish" and option == "authentication_mode":
return "conf"
else:
return util.config.get(section, option)
g.oneview_client.appliance_node_information.get_version.return_value = \
{'uuid': '00000000-0000-0000-0000-000000000000'}
config_mock.get.side_effect = side_effect
result = self.client.get("/redfish/v1/")
result = json.loads(result.data.decode("utf-8"))
with open(
'oneview_redfish_toolkit/mockups/redfish/ServiceRoot.json'
) as f:
service_root_mockup = json.load(f)
self.assertEqual(service_root_mockup, result)
|
the-stack_106_21427
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import re
import xlrd
import csv
import tldextract
import traceback
def csv_file_to_indicator_list(file_path, col_num, starting_row, auto_detect, default_type, type_col, limit, offset):
indicator_list = []
# TODO: add run on all columns functionality
line_index = 0
with open(file_path, 'rU') as csv_file:
# csv reader can fail when encountering a NULL byte (\0) - so we go through the file and take out the NUL bytes.
file_reader = csv.reader(line.replace('\0', '') for line in csv_file)
for row in file_reader:
if line_index >= starting_row + offset and len(row) != 0:
indicator = row[col_num]
indicator_type = get_indicator_type(indicator, auto_detect, default_type, file_type='csv',
type_col=type_col, csv_row=row)
if indicator_type is None:
continue
indicator_list.append({
"type": indicator_type,
"value": indicator
})
line_index = line_index + 1
if limit and len(indicator_list) == int(str(limit)):
break
return indicator_list
def xls_file_to_indicator_list(file_path, sheet_name, col_num, starting_row, auto_detect, default_type,
type_col, limit, offset):
indicator_list = []
# TODO: add run on all columns functionality
xl_woorkbook = xlrd.open_workbook(file_path)
if sheet_name and sheet_name != 'None':
xl_sheet = xl_woorkbook.sheet_by_name(sheet_name)
else:
xl_sheet = xl_woorkbook.sheet_by_index(0)
for row_index in range(starting_row + offset, xl_sheet.nrows):
indicator = xl_sheet.cell(row_index, col_num).value
indicator_type = get_indicator_type(indicator, auto_detect, default_type, file_type='xls',
type_col=type_col, xl_sheet=xl_sheet, xl_row_index=row_index)
if indicator_type is None:
continue
indicator_list.append({
'type': indicator_type,
'value': indicator
})
if limit and len(indicator_list) == int(str(limit)):
break
return indicator_list
def txt_file_to_indicator_list(file_path, auto_detect, default_type, limit, offset):
with open(file_path, "r") as fp:
file_data = fp.read()
indicator_list = []
raw_splitted_data = re.split(r"\s|\n|\t|\"|\'|\,|\0", file_data)
indicator_index = 0
for indicator in raw_splitted_data:
# drop punctuation
if len(indicator) > 1:
while indicator[-1] in ".,?:;\\)}]/!\n\t\0\"" and len(indicator) > 1:
indicator = indicator[:-1]
while indicator[0] in ".,({[\n\t\"" and len(indicator) > 1:
indicator = indicator[1:]
indicator_type = get_indicator_type(indicator, auto_detect, default_type, file_type='text')
# indicator not recognized skip the word
if indicator_type is None:
continue
if indicator_type is not None and indicator_index < offset:
indicator_index = indicator_index + 1
continue
indicator_list.append({
'type': indicator_type,
'value': indicator
})
if limit and len(indicator_list) == int(str(limit)):
break
return indicator_list
def get_indicator_type(indicator_value, auto_detect, default_type, file_type, type_col=None, xl_sheet=None,
xl_row_index=0, csv_row=None):
"""Returns the indicator type for the given file type.
Args:
indicator_value (str): the indicator value
auto_detect (bool): whether or not to auto_detect the type
default_type (Any): the default type of the indicator (could be None or str)
file_type (str): 'text', 'xls' or 'csv'
type_col (Any): the column from which to fetch the indicator type in xls or csv files (could be None or int)
xl_sheet (Any): the xls sheet from which to fetch the indicator type (could be None or ~xlrd.sheet.Sheet)
xl_row_index (Any): the row number in the xls sheet from which to fetch the indicator type (could be None or int)
csv_row (Any): the csv row from which to fetch the indicator type (could be None or list)
Returns:
Any. returns None if indicator is not recognized in text file
or the indicator is not recognized and no default type given.
Otherwise will return a string indicating the indicator type
"""
indicator_type = detect_type(indicator_value)
# indicator not recognized skip the word in text file
if indicator_type is None and file_type == 'text':
return None
if not auto_detect:
indicator_type = default_type
if file_type != 'text':
if type_col is not None and file_type == 'xls':
indicator_type = xl_sheet.cell(xl_row_index, int(type_col) - 1).value
elif type_col is not None and file_type == 'csv':
indicator_type = csv_row[int(type_col) - 1]
# indicator not recognized in non text file
if indicator_type is None:
# no default value given
if default_type is None:
return None
else:
# default value given
indicator_type = default_type
return indicator_type
def detect_type(indicator):
"""Infer the type of the indicator.
Args:
indicator(str): The indicator whose type we want to check.
Returns:
str. The type of the indicator.
"""
if re.match(sha256Regex, indicator) or re.match(md5Regex, indicator) or re.match(sha1Regex, indicator):
return FeedIndicatorType.File
if re.match(ipv4cidrRegex, indicator):
return FeedIndicatorType.CIDR
if re.match(ipv6cidrRegex, indicator):
return FeedIndicatorType.IPv6CIDR
if re.match(ipv4Regex, indicator):
return FeedIndicatorType.IP
if re.match(ipv6Regex, indicator):
return FeedIndicatorType.IPv6
if re.match(urlRegex, indicator):
return FeedIndicatorType.URL
if re.match(emailRegex, indicator):
return FeedIndicatorType.Email
try:
# we use TLDExtract class to fetch all existing domain suffixes from the bellow mentioned file:
# https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat
# the suffix_list_urls=None is used to not make http calls using the extraction - avoiding SSL errors
if tldextract.TLDExtract(cache_file='https://raw.githubusercontent.com/publicsuffix'
'/list/master/public_suffix_list.dat',
suffix_list_urls=None).__call__(indicator).suffix:
if '*' in indicator:
return FeedIndicatorType.DomainGlob
return FeedIndicatorType.Domain
except Exception:
pass
return None
def fetch_indicators_from_file(args):
file = demisto.getFilePath(args.get('entry_id'))
file_path = file['path']
file_name = file['name']
auto_detect = True if args.get('auto_detect') == 'True' else False
default_type = args.get('default_type')
limit = args.get("limit")
# offset - refers to the indicator list itself -
# lets say you have a list of 500 and you put a limit of 100 on your output -
# you can get the next 100 by putting an offset of 100.
offset = int(str(args.get("offset"))) if args.get('offset') else 0
# the below params are for Excel type files only.
sheet_name = args.get('sheet_name')
indicator_col_num = args.get('indicator_column_number')
indicator_type_col_num = args.get('indicator_type_column_number')
# starting_row is for excel files -
# from which row should I start reading the indicators, it is used to avoid table headers.
starting_row = args.get('starting_row')
if file_name.endswith('xls') or file_name.endswith('xlsx'):
indicator_list = xls_file_to_indicator_list(file_path, sheet_name, int(indicator_col_num) - 1,
int(starting_row) - 1, auto_detect, default_type,
indicator_type_col_num, limit, offset)
elif file_name.endswith('csv'):
indicator_list = csv_file_to_indicator_list(file_path, int(indicator_col_num) - 1, int(starting_row) - 1,
auto_detect, default_type, indicator_type_col_num, limit, offset)
else:
indicator_list = txt_file_to_indicator_list(file_path, auto_detect, default_type, limit, offset)
human_readable = tableToMarkdown(f"Indicators from {file_name}:", indicator_list,
headers=['value', 'type'], removeNull=True)
# Create indicators in demisto
errors = []
for indicator in indicator_list:
res = demisto.executeCommand("createNewIndicator", indicator)
if is_error(res[0]):
errors.append("Error creating indicator - {}".format(res[0]["Contents"]))
if errors:
return_error(json.dumps(errors, indent=4))
return human_readable, None, indicator_list
def main():
try:
return_outputs(*fetch_indicators_from_file(demisto.args()))
except Exception as ex:
return_error('Failed to execute Fetch Indicators From File. Error: {}'.format(str(ex)),
error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
the-stack_106_21428
|
from __future__ import annotations
import asyncio
import email.utils
import functools
import http
import inspect
import logging
import socket
import warnings
from types import TracebackType
from typing import (
Any,
Awaitable,
Callable,
Generator,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
from ..connection import State
from ..datastructures import Headers, HeadersLike, MultipleValuesError
from ..exceptions import (
AbortHandshake,
InvalidHandshake,
InvalidHeader,
InvalidMessage,
InvalidOrigin,
InvalidUpgrade,
NegotiationError,
)
from ..extensions import Extension, ServerExtensionFactory
from ..extensions.permessage_deflate import enable_server_permessage_deflate
from ..headers import (
build_extension,
parse_extension,
parse_subprotocol,
validate_subprotocols,
)
from ..http import USER_AGENT
from ..typing import ExtensionHeader, LoggerLike, Origin, Subprotocol
from .compatibility import loop_if_py_lt_38
from .handshake import build_response, check_request
from .http import read_request
from .protocol import WebSocketCommonProtocol
__all__ = ["serve", "unix_serve", "WebSocketServerProtocol", "WebSocketServer"]
HeadersLikeOrCallable = Union[HeadersLike, Callable[[str, Headers], HeadersLike]]
HTTPResponse = Tuple[http.HTTPStatus, HeadersLike, bytes]
class WebSocketServerProtocol(WebSocketCommonProtocol):
"""
WebSocket server connection.
:class:`WebSocketServerProtocol` provides :meth:`recv` and :meth:`send`
coroutines for receiving and sending messages.
It supports asynchronous iteration to receive messages::
async for message in websocket:
await process(message)
The iterator exits normally when the connection is closed with close code
1000 (OK) or 1001 (going away). It raises
a :exc:`~websockets.exceptions.ConnectionClosedError` when the connection
is closed with any other code.
You may customize the opening handshake in a subclass by
overriding :meth:`process_request` or :meth:`select_subprotocol`.
Args:
ws_server: WebSocket server that created this connection.
See :func:`serve` for the documentation of ``ws_handler``, ``logger``, ``origins``,
``extensions``, ``subprotocols``, and ``extra_headers``.
See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the
documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``.
"""
is_client = False
side = "server"
def __init__(
self,
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated
],
ws_server: WebSocketServer,
*,
logger: Optional[LoggerLike] = None,
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
process_request: Optional[
Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]]
] = None,
select_subprotocol: Optional[
Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol]
] = None,
**kwargs: Any,
) -> None:
if logger is None:
logger = logging.getLogger("websockets.server")
super().__init__(logger=logger, **kwargs)
# For backwards compatibility with 6.0 or earlier.
if origins is not None and "" in origins:
warnings.warn("use None instead of '' in origins", DeprecationWarning)
origins = [None if origin == "" else origin for origin in origins]
# For backwards compatibility with 10.0 or earlier. Done here in
# addition to serve to trigger the deprecation warning on direct
# use of WebSocketServerProtocol.
self.ws_handler = remove_path_argument(ws_handler)
self.ws_server = ws_server
self.origins = origins
self.available_extensions = extensions
self.available_subprotocols = subprotocols
self.extra_headers = extra_headers
self._process_request = process_request
self._select_subprotocol = select_subprotocol
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""
Register connection and initialize a task to handle it.
"""
super().connection_made(transport)
# Register the connection with the server before creating the handler
# task. Registering at the beginning of the handler coroutine would
# create a race condition between the creation of the task, which
# schedules its execution, and the moment the handler starts running.
self.ws_server.register(self)
self.handler_task = self.loop.create_task(self.handler())
async def handler(self) -> None:
"""
Handle the lifecycle of a WebSocket connection.
Since this method doesn't have a caller able to handle exceptions, it
attemps to log relevant ones and guarantees that the TCP connection is
closed before exiting.
"""
try:
try:
await self.handshake(
origins=self.origins,
available_extensions=self.available_extensions,
available_subprotocols=self.available_subprotocols,
extra_headers=self.extra_headers,
)
# Remove this branch when dropping support for Python < 3.8
# because CancelledError no longer inherits Exception.
except asyncio.CancelledError: # pragma: no cover
raise
except ConnectionError:
raise
except Exception as exc:
if isinstance(exc, AbortHandshake):
status, headers, body = exc.status, exc.headers, exc.body
elif isinstance(exc, InvalidOrigin):
if self.debug:
self.logger.debug("! invalid origin", exc_info=True)
status, headers, body = (
http.HTTPStatus.FORBIDDEN,
Headers(),
f"Failed to open a WebSocket connection: {exc}.\n".encode(),
)
elif isinstance(exc, InvalidUpgrade):
if self.debug:
self.logger.debug("! invalid upgrade", exc_info=True)
status, headers, body = (
http.HTTPStatus.UPGRADE_REQUIRED,
Headers([("Upgrade", "websocket")]),
(
f"Failed to open a WebSocket connection: {exc}.\n"
f"\n"
f"You cannot access a WebSocket server directly "
f"with a browser. You need a WebSocket client.\n"
).encode(),
)
elif isinstance(exc, InvalidHandshake):
if self.debug:
self.logger.debug("! invalid handshake", exc_info=True)
status, headers, body = (
http.HTTPStatus.BAD_REQUEST,
Headers(),
f"Failed to open a WebSocket connection: {exc}.\n".encode(),
)
else:
self.logger.error("opening handshake failed", exc_info=True)
status, headers, body = (
http.HTTPStatus.INTERNAL_SERVER_ERROR,
Headers(),
(
b"Failed to open a WebSocket connection.\n"
b"See server log for more information.\n"
),
)
headers.setdefault("Date", email.utils.formatdate(usegmt=True))
headers.setdefault("Server", USER_AGENT)
headers.setdefault("Content-Length", str(len(body)))
headers.setdefault("Content-Type", "text/plain")
headers.setdefault("Connection", "close")
self.write_http_response(status, headers, body)
self.logger.info(
"connection failed (%d %s)", status.value, status.phrase
)
await self.close_transport()
return
try:
await self.ws_handler(self)
except Exception:
self.logger.error("connection handler failed", exc_info=True)
if not self.closed:
self.fail_connection(1011)
raise
try:
await self.close()
except ConnectionError:
raise
except Exception:
self.logger.error("closing handshake failed", exc_info=True)
raise
except Exception:
# Last-ditch attempt to avoid leaking connections on errors.
try:
self.transport.close()
except Exception: # pragma: no cover
pass
finally:
# Unregister the connection with the server when the handler task
# terminates. Registration is tied to the lifecycle of the handler
# task because the server waits for tasks attached to registered
# connections before terminating.
self.ws_server.unregister(self)
self.logger.info("connection closed")
async def read_http_request(self) -> Tuple[str, Headers]:
"""
Read request line and headers from the HTTP request.
If the request contains a body, it may be read from ``self.reader``
after this coroutine returns.
Raises:
InvalidMessage: if the HTTP message is malformed or isn't an
HTTP/1.1 GET request.
"""
try:
path, headers = await read_request(self.reader)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as exc:
raise InvalidMessage("did not receive a valid HTTP request") from exc
if self.debug:
self.logger.debug("< GET %s HTTP/1.1", path)
for key, value in headers.raw_items():
self.logger.debug("< %s: %s", key, value)
self.path = path
self.request_headers = headers
return path, headers
def write_http_response(
self, status: http.HTTPStatus, headers: Headers, body: Optional[bytes] = None
) -> None:
"""
Write status line and headers to the HTTP response.
This coroutine is also able to write a response body.
"""
self.response_headers = headers
if self.debug:
self.logger.debug("> HTTP/1.1 %d %s", status.value, status.phrase)
for key, value in headers.raw_items():
self.logger.debug("> %s: %s", key, value)
if body is not None:
self.logger.debug("> [body] (%d bytes)", len(body))
# Since the status line and headers only contain ASCII characters,
# we can keep this simple.
response = f"HTTP/1.1 {status.value} {status.phrase}\r\n"
response += str(headers)
self.transport.write(response.encode())
if body is not None:
self.transport.write(body)
async def process_request(
self, path: str, request_headers: Headers
) -> Optional[HTTPResponse]:
"""
Intercept the HTTP request and return an HTTP response if appropriate.
You may override this method in a :class:`WebSocketServerProtocol`
subclass, for example:
* to return a HTTP 200 OK response on a given path; then a load
balancer can use this path for a health check;
* to authenticate the request and return a HTTP 401 Unauthorized or a
HTTP 403 Forbidden when authentication fails.
You may also override this method with the ``process_request``
argument of :func:`serve` and :class:`WebSocketServerProtocol`. This
is equivalent, except ``process_request`` won't have access to the
protocol instance, so it can't store information for later use.
:meth:`process_request` is expected to complete quickly. If it may run
for a long time, then it should await :meth:`wait_closed` and exit if
:meth:`wait_closed` completes, or else it could prevent the server
from shutting down.
Args:
path: request path, including optional query string.
request_headers: request headers.
Returns:
Optional[Tuple[http.HTTPStatus, HeadersLike, bytes]]: :obj:`None`
to continue the WebSocket handshake normally.
An HTTP response, represented by a 3-uple of the response status,
headers, and body, to abort the WebSocket handshake and return
that HTTP response instead.
"""
if self._process_request is not None:
response = self._process_request(path, request_headers)
if isinstance(response, Awaitable):
return await response
else:
# For backwards compatibility with 7.0.
warnings.warn(
"declare process_request as a coroutine", DeprecationWarning
)
return response
return None
@staticmethod
def process_origin(
headers: Headers, origins: Optional[Sequence[Optional[Origin]]] = None
) -> Optional[Origin]:
"""
Handle the Origin HTTP request header.
Args:
headers: request headers.
origins: optional list of acceptable origins.
Raises:
InvalidOrigin: if the origin isn't acceptable.
"""
# "The user agent MUST NOT include more than one Origin header field"
# per https://www.rfc-editor.org/rfc/rfc6454.html#section-7.3.
try:
origin = cast(Optional[Origin], headers.get("Origin"))
except MultipleValuesError as exc:
raise InvalidHeader("Origin", "more than one Origin header found") from exc
if origins is not None:
if origin not in origins:
raise InvalidOrigin(origin)
return origin
@staticmethod
def process_extensions(
headers: Headers,
available_extensions: Optional[Sequence[ServerExtensionFactory]],
) -> Tuple[Optional[str], List[Extension]]:
"""
Handle the Sec-WebSocket-Extensions HTTP request header.
Accept or reject each extension proposed in the client request.
Negotiate parameters for accepted extensions.
Return the Sec-WebSocket-Extensions HTTP response header and the list
of accepted extensions.
:rfc:`6455` leaves the rules up to the specification of each
:extension.
To provide this level of flexibility, for each extension proposed by
the client, we check for a match with each extension available in the
server configuration. If no match is found, the extension is ignored.
If several variants of the same extension are proposed by the client,
it may be accepted several times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
This process doesn't allow the server to reorder extensions. It can
only select a subset of the extensions proposed by the client.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
Args:
headers: request headers.
extensions: optional list of supported extensions.
Raises:
InvalidHandshake: to abort the handshake with an HTTP 400 error.
"""
response_header_value: Optional[str] = None
extension_headers: List[ExtensionHeader] = []
accepted_extensions: List[Extension] = []
header_values = headers.get_all("Sec-WebSocket-Extensions")
if header_values and available_extensions:
parsed_header_values: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in header_values], []
)
for name, request_params in parsed_header_values:
for ext_factory in available_extensions:
# Skip non-matching extensions based on their name.
if ext_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
response_params, extension = ext_factory.process_request_params(
request_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
extension_headers.append((name, response_params))
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the client sent. The extension is declined.
# Serialize extension header.
if extension_headers:
response_header_value = build_extension(extension_headers)
return response_header_value, accepted_extensions
# Not @staticmethod because it calls self.select_subprotocol()
def process_subprotocol(
self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP request header.
Return Sec-WebSocket-Protocol HTTP response header, which is the same
as the selected subprotocol.
Args:
headers: request headers.
available_subprotocols: optional list of supported subprotocols.
Raises:
InvalidHandshake: to abort the handshake with an HTTP 400 error.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values and available_subprotocols:
parsed_header_values: List[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
subprotocol = self.select_subprotocol(
parsed_header_values, available_subprotocols
)
return subprotocol
def select_subprotocol(
self,
client_subprotocols: Sequence[Subprotocol],
server_subprotocols: Sequence[Subprotocol],
) -> Optional[Subprotocol]:
"""
Pick a subprotocol among those offered by the client.
If several subprotocols are supported by the client and the server,
the default implementation selects the preferred subprotocol by
giving equal value to the priorities of the client and the server.
If no subprotocol is supported by the client and the server, it
proceeds without a subprotocol.
This is unlikely to be the most useful implementation in practice.
Many servers providing a subprotocol will require that the client
uses that subprotocol. Such rules can be implemented in a subclass.
You may also override this method with the ``select_subprotocol``
argument of :func:`serve` and :class:`WebSocketServerProtocol`.
Args:
client_subprotocols: list of subprotocols offered by the client.
server_subprotocols: list of subprotocols available on the server.
Returns:
Optional[Subprotocol]: Selected subprotocol.
:obj:`None` to continue without a subprotocol.
"""
if self._select_subprotocol is not None:
return self._select_subprotocol(client_subprotocols, server_subprotocols)
subprotocols = set(client_subprotocols) & set(server_subprotocols)
if not subprotocols:
return None
priority = lambda p: (
client_subprotocols.index(p) + server_subprotocols.index(p)
)
return sorted(subprotocols, key=priority)[0]
async def handshake(
self,
origins: Optional[Sequence[Optional[Origin]]] = None,
available_extensions: Optional[Sequence[ServerExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
) -> str:
"""
Perform the server side of the opening handshake.
Args:
origins: list of acceptable values of the Origin HTTP header;
include :obj:`None` if the lack of an origin is acceptable.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of
decreasing preference.
extra_headers: arbitrary HTTP headers to add to the response when
the handshake succeeds.
Returns:
str: path of the URI of the request.
Raises:
InvalidHandshake: if the handshake fails.
"""
path, request_headers = await self.read_http_request()
# Hook for customizing request handling, for example checking
# authentication or treating some paths as plain HTTP endpoints.
early_response_awaitable = self.process_request(path, request_headers)
if isinstance(early_response_awaitable, Awaitable):
early_response = await early_response_awaitable
else:
# For backwards compatibility with 7.0.
warnings.warn("declare process_request as a coroutine", DeprecationWarning)
early_response = early_response_awaitable
# The connection may drop while process_request is running.
if self.state is State.CLOSED:
raise self.connection_closed_exc() # pragma: no cover
# Change the response to a 503 error if the server is shutting down.
if not self.ws_server.is_serving():
early_response = (
http.HTTPStatus.SERVICE_UNAVAILABLE,
[],
b"Server is shutting down.\n",
)
if early_response is not None:
raise AbortHandshake(*early_response)
key = check_request(request_headers)
self.origin = self.process_origin(request_headers, origins)
extensions_header, self.extensions = self.process_extensions(
request_headers, available_extensions
)
protocol_header = self.subprotocol = self.process_subprotocol(
request_headers, available_subprotocols
)
response_headers = Headers()
build_response(response_headers, key)
if extensions_header is not None:
response_headers["Sec-WebSocket-Extensions"] = extensions_header
if protocol_header is not None:
response_headers["Sec-WebSocket-Protocol"] = protocol_header
if callable(extra_headers):
extra_headers = extra_headers(path, self.request_headers)
if extra_headers is not None:
response_headers.update(extra_headers)
response_headers.setdefault("Date", email.utils.formatdate(usegmt=True))
response_headers.setdefault("Server", USER_AGENT)
self.write_http_response(http.HTTPStatus.SWITCHING_PROTOCOLS, response_headers)
self.logger.info("connection open")
self.connection_open()
return path
class WebSocketServer:
"""
WebSocket server returned by :func:`serve`.
This class provides the same interface as :class:`~asyncio.Server`,
notably the :meth:`~asyncio.Server.close`
and :meth:`~asyncio.Server.wait_closed` methods.
It keeps track of WebSocket connections in order to close them properly
when shutting down.
Args:
logger: logger for this server;
defaults to ``logging.getLogger("websockets.server")``;
see the :doc:`logging guide <../topics/logging>` for details.
"""
def __init__(self, logger: Optional[LoggerLike] = None):
if logger is None:
logger = logging.getLogger("websockets.server")
self.logger = logger
# Keep track of active connections.
self.websockets: Set[WebSocketServerProtocol] = set()
# Task responsible for closing the server and terminating connections.
self.close_task: Optional[asyncio.Task[None]] = None
# Completed when the server is closed and connections are terminated.
self.closed_waiter: asyncio.Future[None]
def wrap(self, server: asyncio.base_events.Server) -> None:
"""
Attach to a given :class:`~asyncio.Server`.
Since :meth:`~asyncio.loop.create_server` doesn't support injecting a
custom ``Server`` class, the easiest solution that doesn't rely on
private :mod:`asyncio` APIs is to:
- instantiate a :class:`WebSocketServer`
- give the protocol factory a reference to that instance
- call :meth:`~asyncio.loop.create_server` with the factory
- attach the resulting :class:`~asyncio.Server` with this method
"""
self.server = server
for sock in server.sockets:
if sock.family == socket.AF_INET:
name = "%s:%d" % sock.getsockname()
elif sock.family == socket.AF_INET6:
name = "[%s]:%d" % sock.getsockname()[:2]
elif sock.family == socket.AF_UNIX:
name = sock.getsockname()
# In the unlikely event that someone runs websockets over a
# protocol other than IP or Unix sockets, avoid crashing.
else: # pragma: no cover
name = str(sock.getsockname())
self.logger.info("server listening on %s", name)
# Initialized here because we need a reference to the event loop.
# This should be moved back to __init__ in Python 3.10.
self.closed_waiter = server.get_loop().create_future()
def register(self, protocol: WebSocketServerProtocol) -> None:
"""
Register a connection with this server.
"""
self.websockets.add(protocol)
def unregister(self, protocol: WebSocketServerProtocol) -> None:
"""
Unregister a connection with this server.
"""
self.websockets.remove(protocol)
def close(self) -> None:
"""
Close the server.
This method:
* closes the underlying :class:`~asyncio.Server`;
* rejects new WebSocket connections with an HTTP 503 (service
unavailable) error; this happens when the server accepted the TCP
connection but didn't complete the WebSocket opening handshake prior
to closing;
* closes open WebSocket connections with close code 1001 (going away).
:meth:`close` is idempotent.
"""
if self.close_task is None:
self.close_task = self.get_loop().create_task(self._close())
async def _close(self) -> None:
"""
Implementation of :meth:`close`.
This calls :meth:`~asyncio.Server.close` on the underlying
:class:`~asyncio.Server` object to stop accepting new connections and
then closes open connections with close code 1001.
"""
self.logger.info("server closing")
# Stop accepting new connections.
self.server.close()
# Wait until self.server.close() completes.
await self.server.wait_closed()
# Wait until all accepted connections reach connection_made() and call
# register(). See https://bugs.python.org/issue34852 for details.
await asyncio.sleep(0, **loop_if_py_lt_38(self.get_loop()))
# Close OPEN connections with status code 1001. Since the server was
# closed, handshake() closes OPENING connections with a HTTP 503
# error. Wait until all connections are closed.
# asyncio.wait doesn't accept an empty first argument
if self.websockets:
await asyncio.wait(
[
asyncio.create_task(websocket.close(1001))
for websocket in self.websockets
],
**loop_if_py_lt_38(self.get_loop()),
)
# Wait until all connection handlers are complete.
# asyncio.wait doesn't accept an empty first argument.
if self.websockets:
await asyncio.wait(
[websocket.handler_task for websocket in self.websockets],
**loop_if_py_lt_38(self.get_loop()),
)
# Tell wait_closed() to return.
self.closed_waiter.set_result(None)
self.logger.info("server closed")
async def wait_closed(self) -> None:
"""
Wait until the server is closed.
When :meth:`wait_closed` returns, all TCP connections are closed and
all connection handlers have returned.
To ensure a fast shutdown, a connection handler should always be
awaiting at least one of:
* :meth:`~WebSocketServerProtocol.recv`: when the connection is closed,
it raises :exc:`~websockets.exceptions.ConnectionClosedOK`;
* :meth:`~WebSocketServerProtocol.wait_closed`: when the connection is
closed, it returns.
Then the connection handler is immediately notified of the shutdown;
it can clean up and exit.
"""
await asyncio.shield(self.closed_waiter)
def get_loop(self) -> asyncio.AbstractEventLoop:
"""
See :meth:`asyncio.Server.get_loop`.
"""
return self.server.get_loop()
def is_serving(self) -> bool:
"""
See :meth:`asyncio.Server.is_serving`.
"""
return self.server.is_serving()
async def start_serving(self) -> None:
"""
See :meth:`asyncio.Server.start_serving`.
"""
await self.server.start_serving() # pragma: no cover
async def serve_forever(self) -> None:
"""
See :meth:`asyncio.Server.serve_forever`.
"""
await self.server.serve_forever() # pragma: no cover
@property
def sockets(self) -> Iterable[socket.socket]:
"""
See :attr:`asyncio.Server.sockets`.
"""
return self.server.sockets
async def __aenter__(self) -> WebSocketServer:
return self # pragma: no cover
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close() # pragma: no cover
await self.wait_closed() # pragma: no cover
class Serve:
"""
Start a WebSocket server listening on ``host`` and ``port``.
Whenever a client connects, the server creates a
:class:`WebSocketServerProtocol`, performs the opening handshake, and
delegates to the connection handler, ``ws_handler``.
The handler receives the :class:`WebSocketServerProtocol` and uses it to
send and receive messages.
Once the handler completes, either normally or with an exception, the
server performs the closing handshake and closes the connection.
Awaiting :func:`serve` yields a :class:`WebSocketServer`. This object
provides :meth:`~WebSocketServer.close` and
:meth:`~WebSocketServer.wait_closed` methods for shutting down the server.
:func:`serve` can be used as an asynchronous context manager::
stop = asyncio.Future() # set this future to exit the server
async with serve(...):
await stop
The server is shut down automatically when exiting the context.
Args:
ws_handler: connection handler. It receives the WebSocket connection,
which is a :class:`WebSocketServerProtocol`, in argument.
host: network interfaces the server is bound to;
see :meth:`~asyncio.loop.create_server` for details.
port: TCP port the server listens on;
see :meth:`~asyncio.loop.create_server` for details.
create_protocol: factory for the :class:`asyncio.Protocol` managing
the connection; defaults to :class:`WebSocketServerProtocol`; may
be set to a wrapper or a subclass to customize connection handling.
logger: logger for this server;
defaults to ``logging.getLogger("websockets.server")``;
see the :doc:`logging guide <../topics/logging>` for details.
compression: shortcut that enables the "permessage-deflate" extension
by default; may be set to :obj:`None` to disable compression;
see the :doc:`compression guide <../topics/compression>` for details.
origins: acceptable values of the ``Origin`` header; include
:obj:`None` in the list if the lack of an origin is acceptable.
This is useful for defending against Cross-Site WebSocket
Hijacking attacks.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of decreasing
preference.
extra_headers (Union[HeadersLike, Callable[[str, Headers], HeadersLike]]):
arbitrary HTTP headers to add to the request; this can be
a :data:`~websockets.datastructures.HeadersLike` or a callable
taking the request path and headers in arguments and returning
a :data:`~websockets.datastructures.HeadersLike`.
process_request (Optional[Callable[[str, Headers], \
Awaitable[Optional[Tuple[http.HTTPStatus, HeadersLike, bytes]]]]]):
intercept HTTP request before the opening handshake;
see :meth:`~WebSocketServerProtocol.process_request` for details.
select_subprotocol: select a subprotocol supported by the client;
see :meth:`~WebSocketServerProtocol.select_subprotocol` for details.
See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the
documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``.
Any other keyword arguments are passed the event loop's
:meth:`~asyncio.loop.create_server` method.
For example:
* You can set ``ssl`` to a :class:`~ssl.SSLContext` to enable TLS.
* You can set ``sock`` to a :obj:`~socket.socket` that you created
outside of websockets.
Returns:
WebSocketServer: WebSocket server.
"""
def __init__(
self,
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated
],
host: Optional[Union[str, Sequence[str]]] = None,
port: Optional[int] = None,
*,
create_protocol: Optional[Callable[[Any], WebSocketServerProtocol]] = None,
logger: Optional[LoggerLike] = None,
compression: Optional[str] = "deflate",
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
process_request: Optional[
Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]]
] = None,
select_subprotocol: Optional[
Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol]
] = None,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: Optional[float] = None,
max_size: Optional[int] = 2**20,
max_queue: Optional[int] = 2**5,
read_limit: int = 2**16,
write_limit: int = 2**16,
**kwargs: Any,
) -> None:
# Backwards compatibility: close_timeout used to be called timeout.
timeout: Optional[float] = kwargs.pop("timeout", None)
if timeout is None:
timeout = 10
else:
warnings.warn("rename timeout to close_timeout", DeprecationWarning)
# If both are specified, timeout is ignored.
if close_timeout is None:
close_timeout = timeout
# Backwards compatibility: create_protocol used to be called klass.
klass: Optional[Type[WebSocketServerProtocol]] = kwargs.pop("klass", None)
if klass is None:
klass = WebSocketServerProtocol
else:
warnings.warn("rename klass to create_protocol", DeprecationWarning)
# If both are specified, klass is ignored.
if create_protocol is None:
create_protocol = klass
# Backwards compatibility: recv() used to return None on closed connections
legacy_recv: bool = kwargs.pop("legacy_recv", False)
# Backwards compatibility: the loop parameter used to be supported.
_loop: Optional[asyncio.AbstractEventLoop] = kwargs.pop("loop", None)
if _loop is None:
loop = asyncio.get_event_loop()
else:
loop = _loop
warnings.warn("remove loop argument", DeprecationWarning)
ws_server = WebSocketServer(logger=logger)
secure = kwargs.get("ssl") is not None
if compression == "deflate":
extensions = enable_server_permessage_deflate(extensions)
elif compression is not None:
raise ValueError(f"unsupported compression: {compression}")
if subprotocols is not None:
validate_subprotocols(subprotocols)
factory = functools.partial(
create_protocol,
# For backwards compatibility with 10.0 or earlier. Done here in
# addition to WebSocketServerProtocol to trigger the deprecation
# warning once per serve() call rather than once per connection.
remove_path_argument(ws_handler),
ws_server,
host=host,
port=port,
secure=secure,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
max_size=max_size,
max_queue=max_queue,
read_limit=read_limit,
write_limit=write_limit,
loop=_loop,
legacy_recv=legacy_recv,
origins=origins,
extensions=extensions,
subprotocols=subprotocols,
extra_headers=extra_headers,
process_request=process_request,
select_subprotocol=select_subprotocol,
logger=logger,
)
if kwargs.pop("unix", False):
path: Optional[str] = kwargs.pop("path", None)
# unix_serve(path) must not specify host and port parameters.
assert host is None and port is None
create_server = functools.partial(
loop.create_unix_server, factory, path, **kwargs
)
else:
create_server = functools.partial(
loop.create_server, factory, host, port, **kwargs
)
# This is a coroutine function.
self._create_server = create_server
self.ws_server = ws_server
# async with serve(...)
async def __aenter__(self) -> WebSocketServer:
return await self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.ws_server.close()
await self.ws_server.wait_closed()
# await serve(...)
def __await__(self) -> Generator[Any, None, WebSocketServer]:
# Create a suitable iterator by calling __await__ on a coroutine.
return self.__await_impl__().__await__()
async def __await_impl__(self) -> WebSocketServer:
server = await self._create_server()
self.ws_server.wrap(server)
return self.ws_server
# yield from serve(...) - remove when dropping Python < 3.10
__iter__ = __await__
serve = Serve
def unix_serve(
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated
],
path: Optional[str] = None,
**kwargs: Any,
) -> Serve:
"""
Similar to :func:`serve`, but for listening on Unix sockets.
This function builds upon the event
loop's :meth:`~asyncio.loop.create_unix_server` method.
It is only available on Unix.
It's useful for deploying a server behind a reverse proxy such as nginx.
Args:
path: file system path to the Unix socket.
"""
return serve(ws_handler, path=path, unix=True, **kwargs)
def remove_path_argument(
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]],
]
) -> Callable[[WebSocketServerProtocol], Awaitable[Any]]:
try:
inspect.signature(ws_handler).bind(None)
except TypeError:
try:
inspect.signature(ws_handler).bind(None, "")
except TypeError: # pragma: no cover
# ws_handler accepts neither one nor two arguments; leave it alone.
pass
else:
# ws_handler accepts two arguments; activate backwards compatibility.
# Enable deprecation warning and announce deprecation in 11.0.
# warnings.warn("remove second argument of ws_handler", DeprecationWarning)
async def _ws_handler(websocket: WebSocketServerProtocol) -> Any:
return await cast(
Callable[[WebSocketServerProtocol, str], Awaitable[Any]],
ws_handler,
)(websocket, websocket.path)
return _ws_handler
return cast(
Callable[[WebSocketServerProtocol], Awaitable[Any]],
ws_handler,
)
|
the-stack_106_21430
|
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
pair_list = []
output = ""
for x in range(0, len(s)):
pair_list.append([s[x], indices[x]])
for x in sorted(pair_list, key=lambda x: x[1]):
output += x[0]
return output
|
the-stack_106_21432
|
"""Common framework for loading yaml files."""
import yaml
import os
import os.path
from typing import Optional
from jinja2.sandbox import SandboxedEnvironment
class YamlFileObject:
"""Common class for a yaml file object."""
default_file_name = "default.yaml"
templated = True
@classmethod
def from_dict(cls, config, **kwargs):
"""Load a schedule from a dict."""
return NotImplementedError(
"from_dict method not overriden for {0}".format(cls.__name__)
)
@staticmethod
def env_var(var: str, default: Optional[str] = None) -> str:
"""The env_var() function. Return the environment variable named 'var'.
If there is no such environment variable set, return the default.
If the default is None, raise an exception for an undefined variable.
This is modified directly from: https://github.com/fishtown-analytics/dbt/blob/cee0bfbfa2596520032b766fd1027fe748777c75/core/dbt/context/base.py#L275
"""
if var in os.environ:
return os.environ[var]
elif default is not None:
return default
else:
raise ValueError(f"Env var required but not provided: '{var}'")
@classmethod
def _template_string(cls, raw_string: str) -> str:
"""Render a jinja templated string.
Reference for macros: https://github.com/fishtown-analytics/dbt/blob/cee0bfbfa2596520032b766fd1027fe748777c75/core/dbt/context/base.py#L275
"""
jinja_context: dict = {"env_var": cls.env_var}
jinja_env = SandboxedEnvironment(
# The do extension allows the "do" directive
autoescape=False,
extensions=["jinja2.ext.do"],
)
template = jinja_env.from_string(raw_string, globals=jinja_context)
return template.render()
@classmethod
def from_string(cls, raw_string, **kwargs):
"""Load a object from a string.
This applies jinja templating.
"""
if cls.templated:
rendered_string = cls._template_string(raw_string)
else:
rendered_string = raw_string
config_dict = yaml.safe_load(rendered_string)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def from_file(cls, fname, **kwargs):
"""Load a object from a file."""
with open(fname) as raw_file:
raw_string = raw_file.read()
return cls.from_string(raw_string, **kwargs)
@classmethod
def from_path(cls, path, fname=None, **kwargs):
"""Load a file from a path."""
# Expand user if relevant.
path = os.path.expanduser(path)
return cls.from_file(
fname=os.path.join(path, fname or cls.default_file_name), **kwargs
)
|
the-stack_106_21433
|
#!/usr/bin/env python
# coding: utf-8
#loading modules/libraries
import numpy
import matplotlib
import decimal
#loading .csv from GIS
numpy.loadtxt(fname= "simplecoast.csv", delimiter = ",")
#importing the csv that I created from a GIS shapefile trace & transforming into a re-useable variable
coastline = (numpy.loadtxt(fname = "simplecoast.csv",delimiter = ","))
#examining the data
print(coastline)
print (type(coastline))
print(coastline.dtype)
print(coastline.shape)
# need to separate the data by x and y so that I can plot it.
# x-data
x = coastline[:,0]
#y-data
y = coastline[:,1]
#print(x)
#print(y)
matplotlib.pyplot.plot(x,y)
# the simple coastline is created for the MD,VA and NC coastal borders.
|
the-stack_106_21434
|
import argparse
import os
import sys
from sem_seg.model import *
import sem_seg.indoor3d_util as indoor3d_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--model_path', required=True, help='model checkpoint file path')
parser.add_argument('--dump_dir', required=True, help='dump folder path')
parser.add_argument('--output_filelist', required=True, help='TXT filename, filelist, each line is an output for a room')
parser.add_argument('--room_data_filelist', required=True, help='TXT filename, filelist, each line is a test room data label file.')
parser.add_argument('--no_clutter', action='store_true', help='If true, donot count the clutter class')
parser.add_argument('--visu', action='store_true', help='Whether to output OBJ file for prediction visualization.')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(FLAGS.room_data_filelist)]
NUM_CLASSES = 13
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate():
is_training = False
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# simple model
pred = get_model(pointclouds_pl, is_training_pl)
loss = get_loss(pred, labels_pl)
pred_softmax = tf.nn.softmax(pred)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = True
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'pred_softmax': pred_softmax,
'loss': loss}
total_correct = 0
total_seen = 0
fout_out_filelist = open(FLAGS.output_filelist, 'w')
for room_path in ROOM_PATH_LIST:
out_data_label_filename = os.path.basename(room_path)[:-4] + '_pred.txt'
out_data_label_filename = os.path.join(DUMP_DIR, out_data_label_filename)
out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt'
out_gt_label_filename = os.path.join(DUMP_DIR, out_gt_label_filename)
print(room_path, out_data_label_filename)
a, b = eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename)
total_correct += a
total_seen += b
fout_out_filelist.write(out_data_label_filename+'\n')
fout_out_filelist.close()
log_string('all room eval accuracy: %f'% (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename):
error_cnt = 0
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
if FLAGS.visu:
fout = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_pred.obj'), 'w')
fout_gt = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_gt.obj'), 'w')
fout_data_label = open(out_data_label_filename, 'w')
fout_gt_label = open(out_gt_label_filename, 'w')
current_data, current_label = indoor3d_util.room2blocks_wrapper_normalized(room_path, NUM_POINT)
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
# Get room dimension..
data_label = np.load(room_path)
data = data_label[:,0:6]
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
print(file_size)
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
cur_batch_size = end_idx - start_idx
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
loss_val, pred_val = sess.run([ops['loss'], ops['pred_softmax']],
feed_dict=feed_dict)
if FLAGS.no_clutter:
pred_label = np.argmax(pred_val[:,:,0:12], 2) # BxN
else:
pred_label = np.argmax(pred_val, 2) # BxN
# Save prediction labels to OBJ file
for b in range(BATCH_SIZE):
pts = current_data[start_idx+b, :, :]
l = current_label[start_idx+b,:]
pts[:,6] *= max_room_x
pts[:,7] *= max_room_y
pts[:,8] *= max_room_z
pts[:,3:6] *= 255.0
pred = pred_label[b, :]
for i in range(NUM_POINT):
color = indoor3d_util.g_label2color[pred[i]]
color_gt = indoor3d_util.g_label2color[current_label[start_idx+b, i]]
if FLAGS.visu:
fout.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color[0], color[1], color[2]))
fout_gt.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color_gt[0], color_gt[1], color_gt[2]))
fout_data_label.write('%f %f %f %d %d %d %f %d\n' % (pts[i,6], pts[i,7], pts[i,8], pts[i,3], pts[i,4], pts[i,5], pred_val[b,i,pred[i]], pred[i]))
fout_gt_label.write('%d\n' % (l[i]))
correct = np.sum(pred_label == current_label[start_idx:end_idx,:])
total_correct += correct
total_seen += (cur_batch_size*NUM_POINT)
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
for j in range(NUM_POINT):
l = current_label[i, j]
total_seen_class[l] += 1
total_correct_class[l] += (pred_label[i-start_idx, j] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
fout_data_label.close()
fout_gt_label.close()
if FLAGS.visu:
fout.close()
fout_gt.close()
return total_correct, total_seen
if __name__=='__main__':
with tf.Graph().as_default():
evaluate()
LOG_FOUT.close()
|
the-stack_106_21437
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def logout_view(request):
"""Log the user out"""
logout(request)
return HttpResponseRedirect(reverse('address_books:index'))
def register(request):
"""Register new user."""
if request.method != 'POST':
# Display blank registration forms
form = UserCreationForm()
else:
# Process completed form
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# Log the user in and redirect to home page
authenticated_user = authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('address_books:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
|
the-stack_106_21438
|
"""Imports unittest as a replacement for testing.pybase.googletest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import itertools
import os
import tempfile
# pylint: disable=wildcard-import
from unittest import *
unittest_main = main
# pylint: disable=invalid-name
# pylint: disable=undefined-variable
def main(*args, **kwargs):
"""Delegate to unittest.main after redefining testLoader."""
if 'TEST_SHARD_STATUS_FILE' in os.environ:
try:
f = None
try:
f = open(os.environ['TEST_SHARD_STATUS_FILE'], 'w')
f.write('')
except IOError:
sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.'
% os.environ['TEST_SHARD_STATUS_FILE'])
sys.exit(1)
finally:
if f is not None: f.close()
if ('TEST_TOTAL_SHARDS' not in os.environ or
'TEST_SHARD_INDEX' not in os.environ):
return unittest_main(*args, **kwargs)
total_shards = int(os.environ['TEST_TOTAL_SHARDS'])
shard_index = int(os.environ['TEST_SHARD_INDEX'])
base_loader = TestLoader()
delegate_get_names = base_loader.getTestCaseNames
bucket_iterator = itertools.cycle(range(total_shards))
def getShardedTestCaseNames(testCaseClass):
filtered_names = []
for testcase in sorted(delegate_get_names(testCaseClass)):
bucket = next(bucket_iterator)
if bucket == shard_index:
filtered_names.append(testcase)
return filtered_names
# Override getTestCaseNames
base_loader.getTestCaseNames = getShardedTestCaseNames
kwargs['testLoader'] = base_loader
unittest_main(*args, **kwargs)
def GetTempDir():
first_frame = inspect.stack()[-1][0]
temp_dir = os.path.join(
tempfile.gettempdir(), os.path.basename(inspect.getfile(first_frame)))
temp_dir = temp_dir.rstrip('.py')
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir, 0o755)
return temp_dir
def StatefulSessionAvailable():
return False
|
the-stack_106_21439
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from .debug_options import DebugOptions, LogLevel
from . import (_utils,
_io,
_logger,
_onnx_models,
_are_deterministic_algorithms_enabled)
from .torch_cpp_extensions.cpu.aten_op_executor import load_aten_op_executor_cpp_extension_if_needed
from ._custom_autograd_function import custom_autograd_function_enabler
from ._custom_autograd_function_exporter import _post_process_after_export
from ._graph_execution_interface import GraphExecutionInterface
from ._fallback import (_FallbackManager,
ORTModuleDeviceException,
ORTModuleONNXModelException,
ORTModuleTorchModelException,
wrap_exception)
from ._gradient_accumulation_manager import GradientAccumulationManager
from onnxruntime.training.ortmodule import ONNX_OPSET_VERSION
from onnxruntime.capi import _pybind_state as C
from onnxruntime.tools.symbolic_shape_infer import SymbolicShapeInference
from abc import ABC, abstractmethod
import copy
from functools import reduce
import io
import inspect
import os
import onnx
import onnxruntime
import torch
import warnings
from enum import IntFlag
from torch.utils.cpp_extension import ROCM_HOME
class _RunStateInfo(object):
def __init__(self, state, output_info):
"""
:param state: State of partial run that contains intermediate tensors needed to resume the run later.
:param output_info: Output info.
"""
self.state = state
self.output_info = output_info
class _SkipCheck(IntFlag):
"""Enumeration to specify which checks should be skipped, allowing faster execution"""
SKIP_CHECK_DISABLED = 1
SKIP_CHECK_DEVICE = 2
SKIP_CHECK_BUILD_GRADIENT = 4
SKIP_CHECK_EXECUTION_AGENT = 8
def is_set(self, check):
"""Check whether `check` is set on the `_SkipCheck instance
SKIP_CHECK_DISABLED implies the check will return False
"""
return not _SkipCheck.is_disabled(self) and check in self
def is_disabled(self):
"""Check whether `_SkipCheck.SKIP_CHECK_DISABLED is set on the `_SkipCheck instance"""
return _SkipCheck.SKIP_CHECK_DISABLED in self
class GraphExecutionManager(GraphExecutionInterface):
def __init__(self, module, debug_options: DebugOptions, fallback_manager: _FallbackManager):
"""Manages construction and execution of ONNX graphs"""
super(GraphExecutionManager, self).__init__(module._original_module)
# IMPORTANT: Debug and Fallback must the configured first
self._debug_options = debug_options
self._fallback_manager = fallback_manager
# Original and flattened (tranformed) output module
self._flattened_module = module
# onnx models
self._onnx_models = _onnx_models.ONNXModels()
# Model after inference optimization or gradient building.
self._optimized_onnx_model = None
self._graph_builder = None
self._graph_info = None
self._graph_initializer_names = None
self._graph_initializer_names_to_train = None
self._graph_initializers = None
# TrainingAgent or InferenceAgent
self._execution_agent = None
# indicators of some logic have been executed previously thus could be skipped for faster training
self._skip_check = reduce(lambda x, y: x | y,
[_SkipCheck[name] for name in
_utils.parse_os_env_skip_check_flags('ORTMODULE_SKIPCHECK_POLICY',
_SkipCheck.SKIP_CHECK_DISABLED.name)])
self._first_skip_check_warning = True
# Graph transformer config
# Specify cast propagation strategy. Currently three strategies are available, NONE, INSERT-AND-REDUCE and FLOOD-FILL
# The default is NONE, which implies the transformer does no cast-propagation transformation.
self._propagate_cast_ops_strategy = C.PropagateCastOpsStrategy.NONE
# Optimize by moving Cast operations if propagate_cast_ops_level is non-negative.
# - If the _propagate_cast_ops_level is set to zero, then the transformation considers only the opcodes specified by _propagate_cast_ops_allow
# as "FP16 safe", in order to insert/(re)move cast operations before/after to perform such operations in reduced (16-bit) precision.
# - If propagate_cast_ops_level is positive, 1 or 2, then in addition to opcode codes specified by propagate_cast_ops_allow use onnxruntime
# predetermined list of opcodes considered safe to move before/after cast operation.
# - Onnxruntime Level 1 predetermind "FP16 safe" opcodes include only opcode that do not perform any computation such as Transpose, Split, Reshape, etc.
# whereas Level 2 perdetermined "FP16 safe" opcodes include opcodes that perform computation using contrib ops, GeLU, Dropout, LayerNormalization, etc.
self._propagate_cast_ops_level = 1
# List of opcodes to be considered safe to move before/after cast operation if propagate_cast_ops_level is zero.
self._propagate_cast_ops_allow = []
# Whether allow fusion of layer norm subgraph if doing so will cause modified precision.
self._allow_layer_norm_mod_precision = False
# Value can be either torch.onnx.TrainingMode.TRAINING or torch.onnx.TrainingMode.EVAL
# To be instantiated in the concrete implementation of GraphExecutionManager
self._export_mode = None
# Exporter can take extra arguments for ORTModule extensions
# It cannot overlap with required/immutable arguments (validated in runtime)
self._export_extra_kwargs = {}
# Related to training graph shape inference
self._current_input_shape = None
# default execution order is priority-based for both dynamic/static shape input for now
# if we observe benefit of static shape, we can expose this flag to user
self._use_static_shape = False
# flag to enable symbolic shape inference for dynamic shape inputs to improve performance
self._run_symbolic_shape_infer = True
# PyTorch custom Autograd function support
self._enable_custom_autograd_function = custom_autograd_function_enabler.state
self._input_info = None
self._module_output_schema = None
self._device = _utils.get_device_from_module(module)
self._module_parameters = inspect.signature(
self._original_module.forward).parameters.values()
# TODO: remove after PyTorch ONNX exporter supports VAR_KEYWORD parameters.
for input_parameter in self._module_parameters:
if input_parameter.kind == inspect.Parameter.VAR_KEYWORD:
if self._debug_options.logging.log_level <= LogLevel.WARNING:
warnings.warn("The model's forward method has **kwargs parameter which has EXPERIMENTAL support!",
UserWarning)
self.is_rocm_pytorch = (True if (
(torch.version.hip is not None) and (ROCM_HOME is not None)) else False)
self._use_external_gpu_allocator = True
# assign self._torch_alloc and self._torch_free if self._use_external_gpu_allocator is True
self._get_torch_gpu_allocator_function_addresses()
# WIP feature to enable caching in Gradient accumulation scenario.
self._enable_grad_acc_optimization = False
self._gradient_accumulation_manager = GradientAccumulationManager()
# Memory aware gradient builder.
self._use_memory_efficient_gradient = False
# Flag to re-export the model due to attribute change on original module.
# Re-export will be avoided if _skip_check is enabled.
self._original_model_has_changed = False
def _get_torch_gpu_allocator_function_addresses(self):
if self._use_external_gpu_allocator and torch.cuda.is_available():
# CPP extension to get torch GPU allocator's alloc and free function addresses
from onnxruntime.training.ortmodule.torch_cpp_extensions import torch_gpu_allocator
self._torch_alloc = torch_gpu_allocator.gpu_caching_allocator_raw_alloc_address()
self._torch_free = torch_gpu_allocator.gpu_caching_allocator_raw_delete_address()
self._torch_empty_cache = torch_gpu_allocator.gpu_caching_allocator_empty_cache_address()
def _validate_module_type(self, module):
"""Raises ORTModuleTorchModelException if the module is not a torch.nn.Module"""
if not isinstance(module, torch.nn.Module):
raise wrap_exception(ORTModuleTorchModelException,
TypeError(f"ORTModule only supports torch.nn.Module as input. {type(module)} is not supported."))
# Hard-coded list of unsupported torch.nn.Module goes here for fallback
if isinstance(module, torch.nn.DataParallel):
raise wrap_exception(ORTModuleTorchModelException,
TypeError("ORTModule is not compatible with torch.nn.DataParallel. "
"Please use torch.nn.parallel.DistributedDataParallel instead."))
@staticmethod
def execution_session_run_forward(execution_session, onnx_model, device, *inputs):
"""Runs the forward pass on `execution_session` with given `onnx_model`, `device` and `inputs`
This is a helper that can be called by the actual `GraphExecutionManager.forward` method
Args:
execution_session (InferenceAgent or InferenceAgent): Agent which runs either inference or train
onnx_model (onnx.ModelProto): ONNX model
device (torch.device): PyTorch device
inputs: (torch.Tensor or a container of): User input
Returns:
Returns a tuple (user_outputs, run_info):
user_outputs: The model output (either torch.Tensor or a container of torch.Tensor)
run_info: A _RunStateInfo which contains extra information about the execution of the graph
"""
raise NotImplemented
@abstractmethod
def forward(self):
"""Executes the forward method for ORTModule
This is an abstract method and must be overridden by a concrete implementation.
This is the only method that the user should call on a concrete instance of the ExecutionManager
All other methods are internal"""
pass
def _build_graph(self):
if self._use_static_shape:
self._graph_builder.build(self._input_info.shape)
else:
self._graph_builder.build()
self._onnx_models.optimized_model = onnx.load_model_from_string(
self._graph_builder.get_model())
self._onnx_models.optimized_pre_grad_model = onnx.load_model_from_string(
self._graph_builder.get_inference_optimized_model())
self._graph_info = self._graph_builder.get_graph_info()
def _get_session_config(self):
"""Creates and returns the session configuration to be used for the ExecutionAgent"""
if _are_deterministic_algorithms_enabled():
if self._debug_options.logging.log_level <= _logger.LogLevel.INFO:
warnings.warn("ORTModule's determinism will be enabled because PyTorch's determinism is enabled.",
UserWarning)
providers = None
provider_options = None
if self._device.type == 'cuda':
# Configure the InferenceSessions to use the specific GPU on which the model is placed.
providers = (["ROCMExecutionProvider"] if self.is_rocm_pytorch else [
"CUDAExecutionProvider"])
providers.append("CPUExecutionProvider")
provider_option_map = {"device_id": str(self._device.index)}
if not self.is_rocm_pytorch:
# Set Conv algo search mode to HEURISTIC, which is same as PyTorch's default setting.
provider_option_map["cudnn_conv_algo_search"] = "HEURISTIC"
provider_option_map["cudnn_conv_use_max_workspace"] = "1"
if self._use_external_gpu_allocator:
provider_option_map["gpu_external_alloc"] = str(self._torch_alloc)
provider_option_map["gpu_external_free"] = str(self._torch_free)
provider_option_map["gpu_external_empty_cache"] = str(self._torch_empty_cache)
provider_options = [provider_option_map, {}]
elif self._device.type == 'cpu':
providers = ["CPUExecutionProvider"]
provider_options = [{}]
session_options = onnxruntime.SessionOptions()
session_options.enable_mem_pattern = False
session_options.enable_mem_reuse = False
session_options.use_deterministic_compute = _are_deterministic_algorithms_enabled()
# default to PRIORITY_BASED execution order
session_options.execution_order = onnxruntime.ExecutionOrder.PRIORITY_BASED
# 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.
session_options.log_severity_level = int(
self._debug_options.logging.log_level)
if self._debug_options.save_onnx_models.save:
session_options.optimized_model_filepath = \
os.path.join(self._debug_options.save_onnx_models.path,
_onnx_models._get_onnx_file_name(
self._debug_options.save_onnx_models.name_prefix,
'execution_model', self._export_mode))
return session_options, providers, provider_options
def _export_model(self, *inputs, **kwargs):
# 1. Set the self._device from the user module
# 2. Verify input schema matches schema used on previous model export
# 3. Export the user model under self._export_training_flag mode
# Return True if the model needed to be exported, False if no export was required.
# Note: Model is only exported when:
# 1. Model has never been exported before.
# 2. Model input schema has changed (changes in inputs requiring gradient, shape, boolean inputs values change, etc)
# Model is not re-exported when the model parameters change. This can happen when the model is a stateful model,
# or the user explicitly changed model parameters after the onnx export.
schema = _io._extract_schema(
{'args': copy.copy(inputs), 'kwargs': copy.copy(kwargs)})
if self._onnx_models.exported_model and schema == self._input_info.schema and not self._original_model_has_changed:
# All required models have already been exported previously
return False
self._set_device_from_module(inputs, kwargs)
self._onnx_models.exported_model = self._get_exported_model(
schema, *inputs, **kwargs)
load_aten_op_executor_cpp_extension_if_needed(self._onnx_models.exported_model)
if self._debug_options.save_onnx_models.save:
self._onnx_models.save_exported_model(self._debug_options.save_onnx_models.path,
self._debug_options.save_onnx_models.name_prefix,
self._export_mode)
if self._run_symbolic_shape_infer:
self._onnx_models.exported_model = SymbolicShapeInference.infer_shapes(self._onnx_models.exported_model,
auto_merge=True, guess_output_rank=True)
return True
def _get_exported_model(self, input_schema, *inputs, **kwargs):
'''Exports PyTorch `self._flattened_module` to ONNX for inferencing or training, using `*inputs` and `**kwargs` as input
TODO: How to support dynamic axes? Dimensions are determined by samples
'''
# Setup dynamic axes for onnx model
self._input_info = _io.parse_inputs_for_onnx_export(self._module_parameters,
None,
input_schema,
inputs,
kwargs)
output_names, output_dynamic_axes, self._module_output_schema = \
_io.parse_outputs_for_onnx_export_and_extract_schema(
self._original_module, inputs, kwargs)
self._input_info.dynamic_axes.update(output_dynamic_axes)
# FlattenedModule needs _InputInfo to expand user input from *args to *args + **kwargs
self._flattened_module._input_info = self._input_info
# Export torch.nn.Module to ONNX
f = io.BytesIO()
# Deepcopy inputs, since input values may change after model run.
# NOTE: Inputs may contain tensors that have attributes preventing their deepcopy (example grad_fn).
# Therefore, deepcopy only the data component of the input tensors for export.
sample_inputs_copy, sample_kwargs_copy = _io.deepcopy_model_input(
*inputs, **kwargs)
# NOTE: Flattening the input will change the 'input schema', resulting in a re-export
sample_inputs_as_tuple = tuple(self._input_info.flatten(
sample_inputs_copy, sample_kwargs_copy, self._device))
# Ops behaving differently under train/eval mode need to exported with the
# correct training flag to reflect the expected behavior.
# For example, the Dropout node in a model is dropped under eval mode.
assert self._export_mode is not None, "Please use a concrete instance of ExecutionManager"
try:
with torch.set_grad_enabled(self._enable_custom_autograd_function), \
_logger.suppress_os_stream_output(log_level=self._debug_options.logging.log_level):
required_export_kwargs = {'input_names': self._input_info.names,
'output_names': output_names,
'opset_version': ONNX_OPSET_VERSION,
'do_constant_folding': False,
'training': self._export_mode,
'dynamic_axes': self._input_info.dynamic_axes,
'verbose': self._debug_options.logging.log_level < LogLevel.WARNING,
'export_params': False,
'keep_initializers_as_inputs': True}
invalid_args = self._export_extra_kwargs.keys() & required_export_kwargs.keys()
assert len(invalid_args) == 0,\
f"The following PyTorch exporter arguments cannot be specified: '{invalid_args}'."
torch.onnx.export(self._flattened_module,
sample_inputs_as_tuple,
f,
**required_export_kwargs,
**self._export_extra_kwargs)
except Exception as e:
raise wrap_exception(ORTModuleONNXModelException,
RuntimeError(f'There was an error while exporting the PyTorch model to ONNX: '
f'\n\n{_utils.get_exception_as_string(e)}'))
exported_model = onnx.load_model_from_string(f.getvalue())
exported_model = _post_process_after_export(exported_model,
self._enable_custom_autograd_function,
self._debug_options.logging.log_level)
return exported_model
def _set_device_from_module(self, inputs, kwargs):
"""Get the device from the module and save it to self._device"""
device = _utils.get_device_from_module(self._original_module) or \
_utils.get_device_from_inputs(inputs, kwargs)
if not self._device or self._device != device:
self._device = device
if not self._device:
raise wrap_exception(ORTModuleDeviceException,
RuntimeError('A device must be specified in the model or inputs!'))
def _get_graph_transformer_config(self):
graph_transformer_config = C.TrainingGraphTransformerConfiguration()
graph_transformer_config.propagate_cast_ops_config = C.PropagateCastOpsConfiguration()
graph_transformer_config.propagate_cast_ops_config.level = self._propagate_cast_ops_level
graph_transformer_config.propagate_cast_ops_config.allow = self._propagate_cast_ops_allow
graph_transformer_config.propagate_cast_ops_config.strategy = self._propagate_cast_ops_strategy
graph_transformer_config.allow_layer_norm_mod_precision = self._allow_layer_norm_mod_precision
return graph_transformer_config
def _initialize_graph_builder(self, training):
"""Creates a new OrtModuleGraphBuilder, initializes it and saves it to self._graph_builder"""
# All initializer names along with user inputs are a part of the onnx graph inputs
# since the onnx model was exported with the flag keep_initializers_as_inputs=True
onnx_initializer_names = {
p.name for p in self._onnx_models.exported_model.graph.input}
# TODO: PyTorch exporter bug: changes the initializer order in ONNX model
initializer_names = [name for name, _ in self._flattened_module.named_parameters()
if name in onnx_initializer_names]
initializer_names_to_train = [name for name, param in self._flattened_module.named_parameters()
if param.requires_grad and name in onnx_initializer_names]
# Build and optimize the full graph
grad_builder_config = C.OrtModuleGraphBuilderConfiguration()
grad_builder_config.initializer_names = initializer_names
grad_builder_config.initializer_names_to_train = initializer_names_to_train
grad_builder_config.input_names_require_grad = self._input_info.require_grad_names
grad_builder_config.build_gradient_graph = training
grad_builder_config.graph_transformer_config = self._get_graph_transformer_config()
grad_builder_config.enable_caching = self._enable_grad_acc_optimization
grad_builder_config.loglevel = _logger.ortmodule_loglevel_to_onnxruntime_c_loglevel(self._debug_options.logging.log_level)
grad_builder_config.use_memory_efficient_gradient = self._use_memory_efficient_gradient
self._graph_builder = C.OrtModuleGraphBuilder()
# It is assumed here that the order and names of the inputs and outputs are not modified by the backend in any way
# and are kept as they appear in the exported onnx model.
self._graph_builder.initialize(
self._onnx_models.exported_model.SerializeToString(), grad_builder_config)
# TODO: Explore ways to make self._graph_info.initializer_names and self._graph_info.initializer_names_to_train
# a set (unordered_set in the backend) that does not require a copy on each reference.
self._graph_initializer_names = set(initializer_names)
self._graph_initializer_names_to_train = set(
initializer_names_to_train)
# Initializers can be cached and used since they are expected not to be re-instantiated
# between forward calls.
self._graph_initializers = [param for name, param in self._flattened_module.named_parameters()
if name in self._graph_initializer_names]
def signal_model_changed(self):
"""Signals the execution manager to re-export the model on the next forward call"""
self._original_model_has_changed = True
|
the-stack_106_21441
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions.
e.g. `a and b -> tf.logical_and(a, b)`. This is not done automatically in TF.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): Properly extrack boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
class LogicalExpressionTransformer(converter.Base):
"""Converts logical expressions to corresponding TF calls."""
def __init__(self, ctx):
super(LogicalExpressionTransformer, self).__init__(ctx)
# TODO(mdan): Look into replacing with bitwise operators instead.
# TODO(mdan): Skip replacing if the function is trivial.
self.op_mapping = {
gast.And: 'tf.logical_and',
gast.Eq: 'tf.equal',
gast.Gt: 'tf.greater',
gast.GtE: 'tf.greater_equal',
gast.Lt: 'tf.less',
gast.LtE: 'tf.less_equal',
gast.Not: 'tf.logical_not',
gast.NotEq: 'tf.not_equal',
gast.Or: 'tf.logical_or',
gast.USub: 'tf.negative',
}
def _expect_simple_symbol(self, operand):
if isinstance(operand, gast.Name):
return
if anno.hasanno(operand, SAFE_BOOLEAN_OPERAND):
return
raise NotImplementedError(
'only simple local variables are supported in logical and compound '
'comparison expressions; for example, we support "a or b" but not '
'"a.x or b"; for a workaround, assign the expression to a local '
'variable and use that instead, for example "tmp = a.x", "tmp or b"')
def _has_matching_func(self, operator):
op_type = type(operator)
return op_type in self.op_mapping
def _matching_func(self, operator):
op_type = type(operator)
return self.op_mapping[op_type]
def _as_function(self, func_name, args):
template = """
func_name(args)
"""
replacement = templates.replace_as_expression(
template, func_name=parser.parse_expression(func_name), args=args)
anno.setanno(replacement, SAFE_BOOLEAN_OPERAND, True)
return replacement
def visit_Compare(self, node):
node = self.generic_visit(node)
if not all(self._has_matching_func(op) for op in node.ops):
if len(node.ops) == 1:
# Basic expressions are safe to leave as they are.
return node
else:
raise NotImplementedError(
'compound expression with at least one unsupported '
'operator: {}'.format(node.ops))
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
op_tree = None
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
while ops_and_comps:
op, right = ops_and_comps.pop(0)
binary_comparison = self._as_function(
self._matching_func(op), (left, right))
if isinstance(left, gast.Name) and isinstance(right, gast.Name):
anno.setanno(binary_comparison, SAFE_BOOLEAN_OPERAND, True)
if op_tree:
self._expect_simple_symbol(right)
op_tree = self._as_function('tf.logical_and',
(binary_comparison, op_tree))
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
return self._as_function(self._matching_func(node.op), node.operand)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
self._expect_simple_symbol(right)
while node_values:
left = node_values.pop()
self._expect_simple_symbol(left)
right = self._as_function(self._matching_func(node.op), (left, right))
return right
def transform(node, ctx):
return LogicalExpressionTransformer(ctx).visit(node)
|
the-stack_106_21444
|
import hashlib
import logging
import os
import shutil
from django.apps import apps
from django.db import models, transaction
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.classes import ModelQueryFields
from mayan.apps.common.signals import signal_mayan_pre_save
from mayan.apps.converter.classes import ConverterBase
from mayan.apps.converter.exceptions import InvalidOfficeFormat, PageCountError
from mayan.apps.converter.layers import layer_saved_transformations
from mayan.apps.converter.transformations import TransformationRotate
from mayan.apps.mimetype.api import get_mimetype
from mayan.apps.storage.classes import DefinedStorageLazy
from mayan.apps.templating.classes import Template
from ..events import event_document_version_new, event_document_version_revert
from ..literals import (
STORAGE_NAME_DOCUMENT_IMAGE, STORAGE_NAME_DOCUMENT_VERSION
)
from ..managers import DocumentVersionManager, ValidDocumentVersionManager
from ..settings import setting_fix_orientation, setting_hash_block_size
from ..signals import signal_post_document_created, signal_post_version_upload
from .document_models import Document
__all__ = ('DocumentVersion',)
logger = logging.getLogger(name=__name__)
# document image cache name hash function
def hash_function():
return hashlib.sha256()
def upload_to(instance, filename):
return instance.document.document_type.get_upload_filename(
instance=instance, filename=filename
)
class DocumentVersion(models.Model):
"""
Model that describes a document version and its properties
Fields:
* mimetype - File mimetype. MIME types are a standard way to describe the
format of a file, in this case the file format of the document.
Some examples: "text/plain" or "image/jpeg". Mayan uses this to determine
how to render a document's file. More information:
http://www.freeformatter.com/mime-types-list.html
* encoding - File Encoding. The filesystem encoding of the document's
file: binary 7-bit, binary 8-bit, text, base64, etc.
* checksum - A hash/checkdigit/fingerprint generated from the document's
binary data. Only identical documents will have the same checksum. If a
document is modified after upload it's checksum will not match, used for
detecting file tampering among other things.
"""
_hooks_pre_create = []
_pre_open_hooks = []
_pre_save_hooks = []
_post_save_hooks = []
document = models.ForeignKey(
on_delete=models.CASCADE, related_name='versions', to=Document,
verbose_name=_('Document')
)
timestamp = models.DateTimeField(
auto_now_add=True, db_index=True, help_text=_(
'The server date and time when the document version was processed.'
), verbose_name=_('Timestamp')
)
comment = models.TextField(
blank=True, default='', help_text=_(
'An optional short text describing the document version.'
), verbose_name=_('Comment')
)
# File related fields
file = models.FileField(
storage=DefinedStorageLazy(name=STORAGE_NAME_DOCUMENT_VERSION),
upload_to=upload_to, verbose_name=_('File')
)
mimetype = models.CharField(
blank=True, editable=False, help_text=_(
'The document version\'s file mimetype. MIME types are a '
'standard way to describe the format of a file, in this case '
'the file format of the document. Some examples: "text/plain" '
'or "image/jpeg". '
), max_length=255, null=True, verbose_name=_('MIME type')
)
encoding = models.CharField(
blank=True, editable=False, help_text=_(
'The document version file encoding. binary 7-bit, binary 8-bit, '
'text, base64, etc.'
), max_length=64, null=True, verbose_name=_('Encoding')
)
checksum = models.CharField(
blank=True, db_index=True, editable=False, help_text=(
'A hash/checkdigit/fingerprint generated from the document\'s '
'binary data. Only identical documents will have the same '
'checksum.'
), max_length=64, null=True, verbose_name=_('Checksum')
)
class Meta:
ordering = ('timestamp',)
verbose_name = _('Document version')
verbose_name_plural = _('Document version')
objects = DocumentVersionManager()
valid = ValidDocumentVersionManager()
@classmethod
def _execute_hooks(cls, hook_list, instance, **kwargs):
result = None
for hook in hook_list:
result = hook(document_version=instance, **kwargs)
if result:
kwargs.update(result)
return result
@classmethod
def _insert_hook_entry(cls, hook_list, func, order=None):
order = order or len(hook_list)
hook_list.insert(order, func)
@classmethod
def execute_pre_create_hooks(cls, kwargs=None):
"""
Helper method to allow checking if it is possible to create
a new document version.
"""
cls._execute_hooks(
hook_list=cls._hooks_pre_create, instance=None, kwargs=kwargs
)
@classmethod
def register_post_save_hook(cls, func, order=None):
cls._insert_hook_entry(
hook_list=cls._post_save_hooks, func=func, order=order
)
@classmethod
def register_pre_create_hook(cls, func, order=None):
cls._insert_hook_entry(
hook_list=cls._hooks_pre_create, func=func, order=order
)
@classmethod
def register_pre_open_hook(cls, func, order=None):
cls._insert_hook_entry(
hook_list=cls._pre_open_hooks, func=func, order=order
)
@classmethod
def register_pre_save_hook(cls, func, order=None):
cls._insert_hook_entry(
hook_list=cls._pre_save_hooks, func=func, order=order
)
def __str__(self):
return self.get_rendered_string()
@cached_property
def cache(self):
Cache = apps.get_model(app_label='file_caching', model_name='Cache')
return Cache.objects.get(
defined_storage_name=STORAGE_NAME_DOCUMENT_IMAGE
)
@cached_property
def cache_partition(self):
partition, created = self.cache.partitions.get_or_create(
name='version-{}'.format(self.uuid)
)
return partition
def delete(self, *args, **kwargs):
for page in self.pages.all():
page.delete()
self.file.storage.delete(name=self.file.name)
self.cache_partition.delete()
return super(DocumentVersion, self).delete(*args, **kwargs)
def execute_pre_save_hooks(self):
"""
Helper method to allow checking if new versions are possible from
outside the model. Currently used by the document version upload link
condition.
"""
DocumentVersion._execute_hooks(
hook_list=DocumentVersion._pre_save_hooks, instance=self
)
def exists(self):
"""
Returns a boolean value that indicates if the document's file
exists in storage. Returns True if the document's file is verified to
be in the document storage. This is a diagnostic flag to help users
detect if the storage has desynchronized (ie: Amazon's S3).
"""
return self.file.storage.exists(self.file.name)
def fix_orientation(self):
for page in self.pages.all():
degrees = page.detect_orientation()
if degrees:
layer_saved_transformations.add_transformation_to(
obj=page, transformation_class=TransformationRotate,
arguments='{{"degrees": {}}}'.format(360 - degrees)
)
def get_absolute_url(self):
return reverse(
viewname='documents:document_version_view', kwargs={
'document_version_id': self.pk
}
)
def get_api_image_url(self, *args, **kwargs):
first_page = self.pages_valid.first()
if first_page:
return first_page.get_api_image_url(*args, **kwargs)
def get_intermediate_file(self):
cache_filename = 'intermediate_file'
cache_file = self.cache_partition.get_file(filename=cache_filename)
if cache_file:
logger.debug('Intermidiate file found.')
return cache_file.open()
else:
logger.debug('Intermidiate file not found.')
try:
with self.open() as version_file_object:
converter = ConverterBase.get_converter_class()(
file_object=version_file_object
)
with converter.to_pdf() as pdf_file_object:
with self.cache_partition.create_file(filename=cache_filename) as file_object:
shutil.copyfileobj(
fsrc=pdf_file_object, fdst=file_object
)
return self.cache_partition.get_file(filename=cache_filename).open()
except InvalidOfficeFormat:
return self.open()
except Exception as exception:
logger.error(
'Error creating intermediate file "%s"; %s.',
cache_filename, exception
)
cache_file = self.cache_partition.get_file(filename=cache_filename)
if cache_file:
cache_file.delete()
raise
def get_rendered_string(self, preserve_extension=False):
if preserve_extension:
filename, extension = os.path.splitext(self.document.label)
return '{} ({}){}'.format(
filename, self.get_rendered_timestamp(), extension
)
else:
return Template(
template_string='{{ instance.document }} - {{ instance.timestamp }}'
).render(context={'instance': self})
def get_rendered_timestamp(self):
return Template(
template_string='{{ instance.timestamp }}'
).render(
context={'instance': self}
)
def natural_key(self):
return (self.checksum, self.document.natural_key())
natural_key.dependencies = ['documents.Document']
@property
def is_in_trash(self):
return self.document.is_in_trash
def open(self, raw=False):
"""
Return a file descriptor to a document version's file irrespective of
the storage backend
"""
if raw:
return self.file.storage.open(name=self.file.name)
else:
file_object = self.file.storage.open(name=self.file.name)
result = DocumentVersion._execute_hooks(
hook_list=DocumentVersion._pre_open_hooks,
instance=self, file_object=file_object
)
if result:
return result['file_object']
else:
return file_object
@property
def page_count(self):
"""
The number of pages that the document posses.
"""
return self.pages.count()
@property
def pages(self):
DocumentPage = apps.get_model(
app_label='documents', model_name='DocumentPage'
)
queryset = ModelQueryFields.get(model=DocumentPage).get_queryset()
return queryset.filter(pk__in=self.version_pages.all())
@property
def pages_valid(self):
DocumentPage = apps.get_model(
app_label='documents', model_name='DocumentPage'
)
return self.pages.filter(pk__in=DocumentPage.valid.filter(document_version=self))
def revert(self, _user=None):
"""
Delete the subsequent versions after this one
"""
logger.info(
'Reverting to document document: %s to version: %s',
self.document, self
)
with transaction.atomic():
event_document_version_revert.commit(
actor=_user, target=self.document
)
for version in self.document.versions.filter(timestamp__gt=self.timestamp):
version.delete()
def save(self, *args, **kwargs):
"""
Overloaded save method that updates the document version's checksum,
mimetype, and page count when created
"""
user = kwargs.pop('_user', None)
new_document_version = not self.pk
if new_document_version:
logger.info('Creating new version for document: %s', self.document)
try:
with transaction.atomic():
self.execute_pre_save_hooks()
signal_mayan_pre_save.send(
instance=self, sender=DocumentVersion, user=user
)
super(DocumentVersion, self).save(*args, **kwargs)
DocumentVersion._execute_hooks(
hook_list=DocumentVersion._post_save_hooks,
instance=self
)
if new_document_version:
# Only do this for new documents
self.update_checksum(save=False)
self.update_mimetype(save=False)
self.save()
self.update_page_count(save=False)
if setting_fix_orientation.value:
self.fix_orientation()
logger.info(
'New document version "%s" created for document: %s',
self, self.document
)
self.document.is_stub = False
if not self.document.label:
self.document.label = force_text(s=self.file)
self.document.save(_commit_events=False)
except Exception as exception:
logger.error(
'Error creating new document version for document "%s"; %s',
self.document, exception
)
raise
else:
if new_document_version:
event_document_version_new.commit(
actor=user, target=self, action_object=self.document
)
signal_post_version_upload.send(
sender=DocumentVersion, instance=self
)
if tuple(self.document.versions.all()) == (self,):
signal_post_document_created.send(
instance=self.document, sender=Document
)
def save_to_file(self, file_object):
"""
Save a copy of the document from the document storage backend
to the local filesystem
"""
with self.open() as input_file_object:
shutil.copyfileobj(fsrc=input_file_object, fdst=file_object)
@property
def size(self):
if self.exists():
return self.file.storage.size(self.file.name)
else:
return None
def update_checksum(self, save=True):
"""
Open a document version's file and update the checksum field using
the user provided checksum function
"""
block_size = setting_hash_block_size.value
if block_size == 0:
# If the setting value is 0 that means disable read limit. To disable
# the read limit passing None won't work, we pass -1 instead as per
# the Python documentation.
# https://docs.python.org/2/tutorial/inputoutput.html#methods-of-file-objects
block_size = -1
if self.exists():
hash_object = hash_function()
with self.open() as file_object:
while (True):
data = file_object.read(block_size)
if not data:
break
hash_object.update(data)
self.checksum = force_text(s=hash_object.hexdigest())
if save:
self.save()
return self.checksum
def update_mimetype(self, save=True):
"""
Read a document verions's file and determine the mimetype by calling
the get_mimetype wrapper
"""
if self.exists():
try:
with self.open() as file_object:
self.mimetype, self.encoding = get_mimetype(
file_object=file_object
)
except Exception:
self.mimetype = ''
self.encoding = ''
finally:
if save:
self.save()
def update_page_count(self, save=True):
try:
with self.open() as file_object:
converter = ConverterBase.get_converter_class()(
file_object=file_object, mime_type=self.mimetype
)
detected_pages = converter.get_page_count()
except PageCountError:
# If converter backend doesn't understand the format,
# use 1 as the total page count
pass
else:
DocumentPage = apps.get_model(
app_label='documents', model_name='DocumentPage'
)
with transaction.atomic():
self.pages.all().delete()
for page_number in range(detected_pages):
DocumentPage.objects.create(
document_version=self, page_number=page_number + 1
)
if save:
self.save()
return detected_pages
@property
def uuid(self):
# Make cache UUID a mix of document UUID, version ID
return '{}-{}'.format(self.document.uuid, self.pk)
|
the-stack_106_21446
|
""" Main Window """
import cv2
import time
import os
import copy
import locale
import threading
import funscript_editor.utils.logging as logging
import shutil
import webbrowser
import platform
import numpy as np
from PyQt5 import QtGui, QtCore ,QtWidgets
from datetime import datetime
from funscript_editor.ui.funscript_editor_view import Ui_MainWindow
from funscript_editor.ui.video_player import VideoPlayer
from funscript_editor.ui.funscript_visualizer import FunscriptVisualizer
from funscript_editor.data.funscript import Funscript
from funscript_editor.utils.config import UI_CONFIG, VERSION, SETTINGS
from funscript_editor.definitions import APP_DOCUMENTATION_DIR
from funscript_editor.data.ffmpegstream import FFmpegStream
from funscript_editor.ui.funscript_generator_window import FunscriptGeneratorWindow
from funscript_editor.ui.theme import setup_theme
import funscript_editor.definitions as definitions
class FunscriptEditorWindow(QtWidgets.QMainWindow):
""" Funscript Editor window """
def __init__(self):
super(FunscriptEditorWindow, self).__init__()
setup_theme()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.__setup_video_player()
self.__setup_funscript_visualizer()
self.__setup_layout()
self.__setup_ui_binding()
self.__setup_modul_bindings()
self.__setup_shortcut_bindings()
self.__setup_variables()
self.__setup_autosave_timer()
self.setMouseTracking(True)
if os.path.exists(definitions.ICON_PATH):
self.setWindowIcon(QtGui.QIcon(definitions.ICON_PATH))
__generateFunscript = QtCore.pyqtSignal()
__openVideo = QtCore.pyqtSignal()
__logger = logging.getLogger(__name__)
def mouseMoveEvent(self, event):
""" Track the mouse in Qt Window """
# On Windows the embedded mpv player do not get the mouse events
if False:
if platform.system() == 'Windows':
self.video_player.send_mouse_pos(
event.x() - self.ui.videoPane.x(),
event.y() - self.ui.videoPane.y() - self.ui.menubar.height())
def closeEvent(self, event):
""" Implement for the Qt closeEvent handler """
self.__save_funscript()
def keyPressEvent(self, event):
""" Implement for the Qt keyPressEvent handler """
super(FunscriptEditorWindow, self).keyPressEvent(event)
if event.key() == QtCore.Qt.Key_Delete: self.__delete_current_action()
if event.key() == QtCore.Qt.Key_Space: self.video_player.toggle_play_pause()
if event.key() == QtCore.Qt.Key_P: self.video_player.toggle_play_pause()
if event.key() == QtCore.Qt.Key_Comma: self.video_player.show_prev_frame()
if event.key() == QtCore.Qt.Key_Period: self.video_player.show_next_frame()
if event.key() == QtCore.Qt.Key_BracketLeft: self.video_player.decrease_speed()
if event.key() == QtCore.Qt.Key_BracketRight: self.video_player.increase_speed()
if event.key() == QtCore.Qt.Key_W: self.video_player.move_stroke_indicator_relative(0,-1)
if event.key() == QtCore.Qt.Key_S: self.video_player.move_stroke_indicator_relative(0,+1)
if event.key() == QtCore.Qt.Key_A: self.video_player.move_stroke_indicator_relative(-1,0)
if event.key() == QtCore.Qt.Key_D: self.video_player.move_stroke_indicator_relative(+1,0)
if event.key() == QtCore.Qt.Key_End: self.video_player.seek_time(self.video_player.get_duration)
if event.key() == QtCore.Qt.Key_Home: self.video_player.seek_time(0)
def __setup_variables(self):
self.funscript = None
def __setup_autosave_timer(self):
self.autosave_timer = QtCore.QTimer(self)
self.autosave_timer.setInterval(UI_CONFIG['autosave_in_sec'] * 1000)
self.autosave_timer.timeout.connect(self.__save_funscript)
self.autosave_timer.start()
def __setup_video_player(self):
self.video_player = VideoPlayer(self.ui.videoPane, key_callback=self.__video_player_on_key_press)
self.video_player.start()
def __setup_funscript_visualizer(self):
self.funscript_visualizer = FunscriptVisualizer(self.ui.animationPane)
self.funscript_visualizer.start()
def __setup_layout(self):
self.ui.splitterHorizontal.setStretchFactor(0, UI_CONFIG['horizontal_stretch_factor'][0])
self.ui.splitterHorizontal.setStretchFactor(1, UI_CONFIG['horizontal_stretch_factor'][1])
self.ui.splitterVertical.setStretchFactor(0, UI_CONFIG['vertical_stretch_factor'][0])
self.ui.splitterVertical.setStretchFactor(1, UI_CONFIG['vertical_stretch_factor'][1])
def __setup_modul_bindings(self):
self.funscript_visualizer.set_select_point_callback(self.video_player.seek_frame)
self.video_player.set_frame_changed_callback(self.__frame_changed_handler)
def __frame_changed_handler(self, frame_num):
if not self.ui.seekBar.isSliderDown():
self.ui.seekBar.setValue(frame_num)
self.funscript_visualizer.set_frame(frame_num)
if self.funscript is None: return
self.ui.currentStrokeLabel.setText('{} ms'.format(\
self.funscript.get_stroke_time(self.video_player.get_current_timestamp_in_millis)))
self.ui.fastestStrokeLabel.setText('{} ms'.format(self.funscript.get_fastest_stroke()))
self.ui.slowstStrokeLabel.setText('{} ms'.format(self.funscript.get_slowest_stroke()))
self.ui.medianStrokesLabel.setText('{} ms'.format(self.funscript.get_median_stroke()))
self.ui.timestamp.setText(FFmpegStream.millisec_to_timestamp(self.video_player.get_current_timestamp_in_millis)+' ')
def __setup_ui_binding(self):
self.ui.menuFile.addAction('Open (Ctrl+O)', self.__open_video)
self.ui.menuFile.addAction('New (Ctrl+N)', self.__new_funscript)
self.ui.menuFile.addAction('Save (Ctrl+S)', self.__save_funscript)
self.ui.menuFile.addAction('Clear History (Ctrl+C)', self.__clear_funscript_history)
self.ui.menuFile.addAction('Close Video (Ctrl+Q)', self.__close_video)
helpMenu = self.ui.menubar.addMenu("Help")
# TODO we schold use an http server to show the documentation
helpMenu.addAction("App Documentation", self.__open_docs)
# helpMenu.addAction("Code Documentation", lambda : webbrowser.open(os.path.join(CODE_DOCUMENTATION_DIR, 'index.html')))
helpMenu.addAction(str('Version '+VERSION))
self.ui.seekBar.sliderReleased.connect(lambda: self.video_player.seek_frame(self.ui.seekBar.value()))
self.__openVideo.connect(self.__open_video)
self.__generateFunscript.connect(self.__generate_funscript)
def __open_docs(self):
app_docs = os.path.join(APP_DOCUMENTATION_DIR, 'index.html')
self.__logger.info("Open Application Documentation: %s", app_docs)
browser = webbrowser.get()
browser.open_new(app_docs)
def __setup_shortcut_bindings(self):
QtWidgets.QShortcut('CTRL+Left', self).activated.connect(self.__seek_prev)
QtWidgets.QShortcut('CTRL+Right', self).activated.connect(self.__seek_next)
QtWidgets.QShortcut('CTRL+Shift+Right', self).activated.connect(self.__seek_to_next_action)
QtWidgets.QShortcut('CTRL+Shift+Left', self).activated.connect(self.__seek_to_prev_action)
QtWidgets.QShortcut('CTRL++', self).activated.connect(self.video_player.inc_stroke_indicator)
QtWidgets.QShortcut('CTRL+-', self).activated.connect(self.video_player.dec_stroke_indicator)
QtWidgets.QShortcut('Shift+End', self).activated.connect(self.__seek_to_last_action)
QtWidgets.QShortcut('Shift+Home', self).activated.connect(self.__seek_to_first_action)
QtWidgets.QShortcut('0', self).activated.connect(lambda: self.__add_action(0))
QtWidgets.QShortcut('1', self).activated.connect(lambda: self.__add_action(10))
QtWidgets.QShortcut('2', self).activated.connect(lambda: self.__add_action(20))
QtWidgets.QShortcut('3', self).activated.connect(lambda: self.__add_action(30))
QtWidgets.QShortcut('4', self).activated.connect(lambda: self.__add_action(40))
QtWidgets.QShortcut('5', self).activated.connect(lambda: self.__add_action(50))
QtWidgets.QShortcut('6', self).activated.connect(lambda: self.__add_action(60))
QtWidgets.QShortcut('7', self).activated.connect(lambda: self.__add_action(70))
QtWidgets.QShortcut('8', self).activated.connect(lambda: self.__add_action(80))
QtWidgets.QShortcut('9', self).activated.connect(lambda: self.__add_action(90))
QtWidgets.QShortcut('CTRL+o', self).activated.connect(self.__open_video)
QtWidgets.QShortcut('CTRL+c', self).activated.connect(self.__clear_funscript_history)
QtWidgets.QShortcut('CTRL+g', self).activated.connect(lambda: self.__generateFunscript.emit())
QtWidgets.QShortcut('CTRL+i', self).activated.connect(self.video_player.toogle_stroke_indicator_inversion)
QtWidgets.QShortcut('CTRL+s', self).activated.connect(self.__save_funscript)
QtWidgets.QShortcut('CTRL+Shift+Delete', self).activated.connect(self.__delete_folowing)
QtWidgets.QShortcut('CTRL+n', self).activated.connect(self.__new_funscript)
QtWidgets.QShortcut('CTRL+q', self).activated.connect(self.__close_video)
def __delete_folowing(self):
reply = QtWidgets.QMessageBox.question(None, 'Delete Folowing Actions', 'Delete all folowing actions? ',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
self.statusBar().showMessage("Delete folowing actions ({})".format(datetime.now().strftime("%H:%M:%S")))
if self.funscript is None: return
self.funscript.delete_folowing_actions(self.video_player.get_current_timestamp_in_millis)
self.funscript_visualizer.update()
def __clear_funscript_history(self):
if self.funscript is None: return
if self.video_player is None: return
if self.video_player.get_video_file is None: return
self.__save_funscript()
funscript_path = ''.join(self.video_player.get_video_file[:-4]) + '.funscript'
num = 0
while (os.path.exists(funscript_path + str(num))):
os.remove(funscript_path + str(num))
num += 1
self.video_player.show_message("The funscript history was cleaned")
def __seek_to_first_action(self):
if self.funscript is not None:
self.video_player.seek_time(self.funscript.get_first_action_time() / float(1000))
def __seek_to_last_action(self):
if self.funscript is not None:
self.video_player.seek_time(self.funscript.get_last_action_time() / float(1000))
def __seek_to_prev_action(self):
if self.funscript is not None:
self.video_player.seek_time( self.funscript.get_prev_action(
self.video_player.get_current_timestamp_in_millis)['at'] / float(1000))
def __seek_to_next_action(self):
if self.funscript is not None:
self.video_player.seek_time( self.funscript.get_next_action(
self.video_player.get_current_timestamp_in_millis)['at'] / float(1000))
def __seek_next(self):
self.video_player.seek_time(\
self.video_player.get_current_timestamp_in_millis / float(1000) + UI_CONFIG['seek_next_sec'])
def __seek_prev(self):
self.video_player.seek_time(\
max((0.0, self.video_player.get_current_timestamp_in_millis \
/ float(1000) - UI_CONFIG['seek_prev_sec'])))
def __close_video(self):
self.video_player.close()
self.funscript_visualizer.clear()
def __invert_actions(self):
if self.funscript is None: return
self.funscript.invert_actions()
self.funscript_visualizer.update()
self.funscript = None
def __video_player_on_key_press(self, key):
if key == 'ctrl+g': self.__generateFunscript.emit()
if key == 'ctrl+s': self.__save_funscript()
if key == 'ctrl+right': self.__seek_next()
if key == 'ctrl+left': self.__seek_prev()
if key == 'ctrl+shift+right': self.__seek_to_next_action()
if key == 'ctrl+shift+left': self.__seek_to_prev_action()
if key == '0': self.__add_action(0)
if key == '1': self.__add_action(10)
if key == '2': self.__add_action(20)
if key == '3': self.__add_action(30)
if key == '4': self.__add_action(40)
if key == '5': self.__add_action(50)
if key == '6': self.__add_action(60)
if key == '7': self.__add_action(70)
if key == '8': self.__add_action(80)
if key == '9': self.__add_action(90)
if key == 'delete': self.__delete_current_action()
if key == 'shift+end': self.__seek_to_last_action()
if key == 'shift+home': self.__seek_to_first_action()
if key == 'ctrl+n': self.__new_funscript()
if key == 'ctrl+o': self.__openVideo.emit()
if key == 'ctrl+c': self.__clear_funscript_history()
if key == 'ctrl+q': self.__close_video()
def __show_message(self, message):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(message+' ')
msg.setWindowTitle("Funscript Editor Info")
msg.exec_()
def __save_funscript(self):
if self.funscript is None: return
if not self.funscript.changed: return
self.funscript.save(''.join(self.video_player.get_video_file[:-4]) + '.funscript')
self.statusBar().showMessage("Funscript Saved ({})".format(datetime.now().strftime("%H:%M:%S")))
def __add_action(self, value):
if self.funscript is None: return
self.funscript.add_action(value, self.video_player.get_current_timestamp_in_millis)
self.funscript_visualizer.update()
def __open_video(self):
file_name = QtWidgets.QFileDialog.getOpenFileName(
None,
caption="Select Video File",
options=QtWidgets.QFileDialog.DontUseNativeDialog # required to get correct path in flatpak
)
if len(file_name) < 1: return
if not any(file_name[0].lower().endswith(x) for x in ['.mkv', '.mp4']): return
self.video_player.open(file_name[0])
funscript_file = ''.join(file_name[0][:-4]) + '.funscript'
if os.path.exists(funscript_file):
self.funscript, file_path = Funscript.load(file_name[0], funscript_file)
self.video_player.show_message('load: ' + str(file_path.split('.')[-1]))
else:
self.funscript = Funscript(fps=self.video_player.get_fps)
self.video_player.set_funscript(self.funscript)
self.funscript_visualizer.set_funscript(self.funscript)
self.ui.seekBar.setMaximum(max((0, self.video_player.get_length-2)))
self.ui.seekBar.setValue(0)
self.ui.timestamp.setText(FFmpegStream.millisec_to_timestamp(0)+' ')
self.__logger.info("Load Video: %s (%s)" , file_name[0], "True" if os.path.exists(file_name[0]) else "False")
def __new_funscript(self):
self.funscript = Funscript(fps=self.video_player.get_fps)
self.video_player.set_funscript(self.funscript)
self.funscript_visualizer.set_funscript(self.funscript)
def __generate_funscript(self):
if self.funscript is None: return
if self.video_player is None: return
if self.video_player.get_video_file is None: return
# self.video_player.set_indicate_bussy(True) # we have no event if the user abort, for now we disable the bussy indicator
start_time = self.video_player.get_current_timestamp_in_millis
next_action = self.funscript.get_next_action(self.video_player.get_current_timestamp_in_millis+100)
end_time = next_action['at'] if next_action['at'] > self.video_player.get_current_timestamp_in_millis+100 else -1.0
self.funscript_generator_window = FunscriptGeneratorWindow(
self.video_player.get_video_file,
start_time,
end_time,
self.funscript
)
self.funscript_generator_window.funscriptCompleted.connect(self.__funscript_generated)
def __funscript_generated(self, funscript, status, success):
self.video_player.set_funscript(self.funscript)
self.funscript_visualizer.set_funscript(self.funscript)
self.video_player.set_indicate_bussy(False)
self.__save_funscript()
self.video_player.show_message(status)
if success:
self.__logger.info("Completed: " + status)
else:
self.__logger.error("Failed: " + status)
def __delete_current_action(self):
if self.funscript is None: return
self.funscript.delete_action(self.video_player.get_current_timestamp_in_millis)
self.funscript_visualizer.update()
|
the-stack_106_21447
|
import fnmatch
import os
import argparse
import sys
import math
import svgwrite #conda install svgwrite
import random
import cairo
import cairosvg
import re
from datetime import date, datetime, timedelta
# WIDTH = 1500
# HEIGHT = 1000
# LEGENDWIDTH = 220
# MARGIN = 60
# LABELSHIFT = 15
# FONTSIZE = 26
class Clade:
def __init__(self, name, parent_name):
self.name = name
self.parent_name = parent_name
self.cladeSnapshot_time_d = {}
self.y1_d = {}
self.y2_d = {}
class Snapshot:
def __init__(self, time, date):
self.time = time
self.date = date
self.label = "t_" + str(time)
self.cladeSnapshot_clade_d = {}
self.sumAll = 0
class CladeSnapshot:
def __init__(self, clade, snapshot, abundance):
self.clade = clade
self.snapshot = snapshot
self.abundance = abundance
self.sumDescendant = 0
#self.numChildren = len(self.clade.children)
#self.numChildren = 0
#to be added later sumDescendant , numChildren
def sumUpDescendants(self):
summed = 0
for child in self.clade.children:
child_tSnap = child.cladeSnapshot_time_d[self.snapshot.time]
if child.children_count > 0:
summed += child_tSnap.sumUpDescendants()
summed += child_tSnap.abundance
return summed
def makeColor():
# random_number = random. randint(0,16777215)
# color = "#" + str(hex(random_number))[2:]
color = "#{:06x}".format(random.randint(0, 0xFFFFFF))
return color
def timeToX(time, scaleTime, minTime):
return(scaleTime*(int(time) - int(minTime)) + MARGIN)
def textwidth(text, fontsize):
#function copied from http://blog.mathieu-leplatre.info/text-extents-with-python-cairo.html
try:
import cairo
except Exception as e:
return len(str) * fontsize
surface = cairo.SVGSurface('undefined.svg', 1280, 200)
cr = cairo.Context(surface)
cr.select_font_face('Arial', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(fontsize)
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(text)
return width
def textheight(text, fontsize):
#function based on textwidth copied from http://blog.mathieu-leplatre.info/text-extents-with-python-cairo.html
try:
import cairo
except Exception as e:
return len(str) * fontsize
surface = cairo.SVGSurface('undefined.svg', 1280, 200)
cr = cairo.Context(surface)
cr.select_font_face('Arial', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(fontsize)
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(text)
return height
# def makeNoScaleCord(abundances_d, childParent_d, timeToDate_d):
# '''
# returns list of shape_l where shape_l is a list cord_l, which is cordinates for a clade, fomated as [(x, y), (x, y), ...] relative to top left
# '''
# shape_l = []
# return shape_l
def drawWrapper(outFolder, outPrefix, root_clades_l, scaleTime, times_l, maxY, minTime, labelPosition, xlabel, timeToDate_d):
#Draw background
outFile = os.path.join(outFolder, outPrefix + ".svg")
outFilePDF = os.path.join(outFolder, outPrefix + ".pdf")
img = svgwrite.Drawing(filename = outFile, size = (str(WIDTH)+"px", str(HEIGHT)+"px"))
img.add(img.polyline(points = [(0,0), (0, HEIGHT), (WIDTH, HEIGHT), (WIDTH, 0)], stroke='black', fill = 'white'))
#img.add(img.text(text = 'Legend', insert = (WIDTH-LEGENDWIDTH, LEGENDWIDTH), font_size=24))
#rightWidth = (WIDTH-(MARGIN + LEGENDWIDTH + 1/scaleTime)) #WIDTH - (MARGIN + LEGENDWIDTH)
rightWidth = timeToX(list(times_l)[-1], scaleTime, minTime)
img.add(img.polyline(points = [(MARGIN,MARGIN), (MARGIN, HEIGHT-MARGIN), (rightWidth, HEIGHT-MARGIN), (rightWidth, MARGIN), (MARGIN,MARGIN)], stroke='black', stroke_width=5, fill="white"))
#draw clades
img, x_labelCord_l, y_labelCord_l, label_l = extractCord_draw(root_clades_l, img, scaleTime, [], [], [], times_l, minTime, labelPosition )
tHeight = textheight("TESTLABEL", FONTSIZE)
#write clade labels
if labelPosition == "Right":
xpos = timeToX(list(times_l)[-1], scaleTime, minTime)+LABELSHIFT
zipped = zip(label_l, y_labelCord_l)
sort_zip = list(sorted(zipped, key = lambda x: x[1]))
tWidth = textwidth(sort_zip[0][0], FONTSIZE)
fontsize = FONTSIZE
while tWidth > LEGENDWIDTH:
fontsize += (-1)
tWidth = textwidth(sort_zip[0][0], fontsize)
ypos = sort_zip[0][1]
img.add(img.text(text = sort_zip[0][0], insert = (xpos, ypos), font_size=fontsize))
for i in range(1, len(sort_zip)):
bottomPrevious = ypos + LABELSHIFT + tHeight
ypos = sort_zip[i][1]
if ypos < bottomPrevious:
ypos = bottomPrevious
tWidth = textwidth(sort_zip[i][0], FONTSIZE)
fontsize = FONTSIZE
while tWidth > (LEGENDWIDTH+MARGIN):
fontsize += (-1)
tWidth = textwidth(sort_zip[i][0], fontsize)
img.add(img.text(text = sort_zip[i][0], insert = (xpos, ypos), font_size=fontsize))
else: #if args.labelPosition == "Start":
for i in range(len(label_l)):
img.add(img.text(text = label_l[i], insert = (x_labelCord_l[i], y_labelCord_l[i]), font_size=FONTSIZE))
#wirte y axis labels
topPlot = MARGIN
bottomPlot = HEIGHT-MARGIN
totalHeight = bottomPlot-topPlot
wirteEvery = int(maxY/10)
maxLabel = str(int(maxY))+ " "
tWidth = textwidth(maxLabel, FONTSIZE)
fontsize = FONTSIZE
while tWidth > (MARGIN-LABELSHIFT):
fontsize = fontsize - 1
tWidth = textwidth(maxLabel, fontsize)
for i in range(int(maxY)):
if i % wirteEvery == 0:
xpos = MARGIN - (textwidth(str(i), fontsize) + LABELSHIFT)
ypos = MARGIN + (maxY - i)*totalHeight/maxY
img.add(img.text(text = str(i), insert = (xpos, ypos + (textheight(str(i), FONTSIZE))/2), font_size=fontsize))
img.add(img.line(start = (MARGIN-LABELSHIFT, ypos), end = (MARGIN, ypos), stroke_width=3, stroke = "black"))
#write x axis labels
if xlabel == "bimonthly":
for time in times_l:
stepDate = date.fromisoformat(timeToDate_d[time])
for t in range(timeWindow):
td = timedelta(days=t)
d = stepDate + td
if d.day == 15 or d.day == 1:
label = datetime.strptime(d.isoformat(), "%Y-%m-%d").strftime("%d %b")
img.add(img.text(text = label, insert = (timeToX(time, scaleTime, minTime), HEIGHT-MARGIN+(2*LABELSHIFT)), font_size=FONTSIZE))
img.add(img.line(start = (timeToX(time, scaleTime, minTime), HEIGHT-MARGIN+LABELSHIFT), end = (timeToX(time, scaleTime, minTime), HEIGHT-(MARGIN)), stroke_width=3, stroke = "black"))
else:
if xlabel == "date":
tWidth = textwidth("2021-05-03", FONTSIZE)
else:
tWidth = textwidth("200", FONTSIZE)
numLabels = rightWidth/tWidth
wirteEvery = math.ceil(len(times_l)/numLabels)
for time in times_l:
if int(time) % wirteEvery == 0:
#if int(time) % int(args.XLABFREQ) == 0:
if xlabel == "time":
label = str(time)
else:
label = timeToDate_d[time]
img.add(img.text(text = label, insert = (timeToX(time, scaleTime, minTime), HEIGHT-MARGIN+(2*LABELSHIFT)), font_size=FONTSIZE))
img.add(img.line(start = (timeToX(time, scaleTime, minTime), HEIGHT-MARGIN+LABELSHIFT), end = (timeToX(time, scaleTime, minTime), HEIGHT-(MARGIN)), stroke_width=3, stroke = "black"))
# write title
fontsize = FONTSIZE+6
tHeight = textheight(outPrefix, fontsize)
while tHeight/2 > MARGIN:
fontsize = fontsize - 1
tHeight = textwidth(outPrefix, fontsize)
img.add(img.text(text = outPrefix.replace("_", " "), insert = (MARGIN + LABELSHIFT, MARGIN/2), font_size=fontsize))
img.save()
cairosvg.svg2pdf(file_obj=open(outFile, "rb"), write_to=outFilePDF)
def extractCord_draw(clades_l, img, scaleTime, x_labelCord_l, y_labelCord_l, label_l, times_l, minTime, labelPosition ):
#for snap in allTimes_l:
clade_cord_d = {} #key clade name, value is list of coordinate tuples [... (x2, y2t2), (x1, y2t1), (x1, y1t1), (x2, y1t2) ...]
#clade_col_d = {} #key clade name, value is color
for clade in clades_l:
coordinate_l = []
startAbundance_time = 'NA'
lastAbundance_time = 0
maxAbundance_time = 'NA'
maxAbundance_value = -1
cladeDrawn = False
for time in times_l:
#x = int(time)*scaleTime
if time in clade.y1_d:
y1 = clade.y1_d[time]
y2 = clade.y2_d[time]
coordinate_l = [(timeToX(time, scaleTime, minTime), y1)] + coordinate_l + [(timeToX(time, scaleTime, minTime), y2)]
abundance = clade.cladeSnapshot_time_d[time].abundance/(1.0*clade.cladeSnapshot_time_d[time].snapshot.sumAll)
if abundance > 0:
cladeDrawn = True
if startAbundance_time == 'NA':
startAbundance_time = time
startAbundance_y = (y2-y1)//2 + y1
lastAbundance_time = time
lastAbundance_y = y1
drawAbundance = y2-y1
if maxAbundance_value < drawAbundance: #abundance:
maxAbundance_value = drawAbundance
maxAbundance_time = time
maxAbundance_y = drawAbundance//2 + y1
clade_cord_d[clade.name] = coordinate_l
if cladeDrawn:
if labelPosition == "Right":
x_cord = timeToX(list(times_l)[-1], scaleTime, minTime)+LABELSHIFT
y1_cord = y1+LABELSHIFT
elif labelPosition == "Max":
x_cord = timeToX(maxAbundance_time, scaleTime, minTime) - textwidth(clade.name, FONTSIZE)//2
if x_cord < 0:
x_cord = 0
y1_cord = maxAbundance_y
elif labelPosition == "Start":
x_cord = timeToX(startAbundance_time, scaleTime, minTime)-(textwidth(clade.name, FONTSIZE))
y1_cord = startAbundance_y
if x_cord < 0:
x_cord = 0
elif labelPosition == "End":
x_cord = timeToX(lastAbundance_time, scaleTime, minTime)+LABELSHIFT
if x_cord < 0:
x_cord = 0
y1_cord = lastAbundance_y
else:
print("do not know where to put clade label")
sys.exit(1)
x_labelCord_l.append(x_cord)
y_labelCord_l.append(y1_cord)
label_l.append(clade.name)
img.add(img.polyline(points = coordinate_l, stroke='black', stroke_width=0, fill=clade.color))
img, x_labelCord_l, y_labelCord_l, label_l = extractCord_draw(clade.children, img, scaleTime, x_labelCord_l, y_labelCord_l, label_l, times_l, minTime, labelPosition )
return(img, x_labelCord_l, y_labelCord_l, label_l)
def removeSmallClades(abundances_d, heiarchy_d, minCount):
'''
removes clade from abudances and heiarchiy that have sum less than minCount
heiarchy_d: key:child clade; value:parent clade
abundances_d: key: week; value: dict of key:clade; value: count
'''
# remove clades with less than minCount
avalibleClades_d = {}
avalibleClades_d['anc'] = 0
avalibleClades_d['NA'] = 0
avalibleClades_s = set()
removedClades_s = set()
for week in abundances_d:
for clade in abundances_d[week]:
if clade not in avalibleClades_d:
avalibleClades_d[clade] = 0
avalibleClades_d[clade] += float(abundances_d[week][clade])
old_heiarchy_d = heiarchy_d.copy()
for clade in old_heiarchy_d: #old_heiarchy_d.keys():
if clade not in avalibleClades_d:
heiarchy_d.pop(clade)
removedClades_s.add(clade)
elif avalibleClades_d[clade] < int(minCount) and clade != 'NA' and clade != 'anc':
heiarchy_d.pop(clade)
removedClades_s.add(clade)
else:
avalibleClades_s.add(clade)
for child in heiarchy_d:
parent = old_heiarchy_d[child]
while parent not in heiarchy_d and parent != 'NA':
newParent = old_heiarchy_d[parent] #heiarchy_d[parent]
parent = newParent
heiarchy_d[child] = parent
# remove from abundances if remved from hieracrhy
old_abundances_d = abundances_d.copy()
for week in list(old_abundances_d):
for clade in list(old_abundances_d[week]):
if clade in removedClades_s:
abundances_d[week].pop(clade)
return(abundances_d, heiarchy_d)
def defineChildBoundries(time, scaleFactor, parentCladeSnap, y1_parent, y2_parent):
'''
time is the number defining the time of interest
parentCladeSnap is pointer to snapshotClade object which has boundries of"
y1_parent is top of clade boundries, y1_parent is bottom of clade boundries
updates
'''
numSections = len(parentCladeSnap.clade.children) + 1
#descendantSpace = (parentCladeSnap.sumDescendant - parentCladeSnap.abundance)
#parentEdgeHeight = scaleFactor*(parentCladeSnap.abundance - descendantSpace)/numSections
parentEdgeHeight = scaleFactor*(parentCladeSnap.abundance)/numSections
acountedHeight = y1_parent
for childClade in parentCladeSnap.clade.children:
childCladeSnap = childClade.cladeSnapshot_time_d[time]
y1 = acountedHeight + parentEdgeHeight
y2 = y1 + (scaleFactor*childCladeSnap.sumDescendant)
acountedHeight += parentEdgeHeight + (scaleFactor*childCladeSnap.sumDescendant)
childClade.y1_d[time] = y1
childClade.y2_d[time] = y2
defineChildBoundries(time, scaleFactor, childCladeSnap, y1, y2)
def main():
########################## parse user arguments
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--parentHierarchy_name', required=True, type=str, help="csv output from mutationLinages_report.py with child parent col")
parser.add_argument('-a', '--abundance_name', required=True, type=str, help="csv output from mutationLinages_report.py with abundances of clades")
parser.add_argument('-c', '--cases_name', required=False, type=str, help="file with cases - formated with 'date' in ISO format and 'confirmed_rolling' cases, in tsv format")
parser.add_argument("--avgWindow", required=False, type=str, help="width of rolling mean window in terms of --timeWindow's (recomend using with small --timeWindow) ; default: sum of counts withen timeWindow (ie no average)")
parser.add_argument('-o', '--outFolder', required=True, type=str, help="csv output from mutationLinages_report.py with child parent col")
parser.add_argument('-mt', '--MINTIME', required=False, type=str, default="30", help="minimum time point to start plotting")
parser.add_argument('-min', '--MINTOTALCOUNT', required=False, type=str, default="50", help="minimum total count for group to be included")
parser.add_argument('-l', '--xlabel', required=False, type=str, choices = ["date", "time", "bimonthly"], default="date", help="Format of x axis label: ISO date format or timepoints from start, or dd-Mon-YYYY on 1st and 15th")
parser.add_argument('-lp', '--labelPosition', required=False, type=str, default="Right", choices = ["Right", "Max", "Start", "End"], help="choose position of clade labels")
#parser.add_argument("--tmrca", action="store_true", help="draw point at tmrca of clade if flag is used")
drawing_group_page = parser.add_argument_group('Options for page setup')
drawing_group_page.add_argument('--WIDTH', required=False, type=str, default="1500", help="WIDTH of page (px)")
drawing_group_page.add_argument('--HEIGHT', required=False, type=str, default="1000", help="HEIGHT of page (px)")
drawing_group_page.add_argument('--LEGENDWIDTH', required=False, type=str, default="220", help="LEGENDWIDTH to the right of plotting area (px)")
drawing_group_page.add_argument('--LABELSHIFT', required=False, type=str, default="15", help="nudge label over by LABELSHIFT (px)")
drawing_group_page.add_argument('--MARGIN', required=False, type=str, default="60", help="MARGIN around all sides of plotting area (px)")
drawing_group_page.add_argument('--FONTSIZE', required=False, type=str, default="26")
args = parser.parse_args()
########################## set up global
if not os.path.exists(args.outFolder):
os.makedirs(args.outFolder)
global WIDTH
WIDTH = float(args.WIDTH)
global HEIGHT
HEIGHT = float(args.HEIGHT)
global LEGENDWIDTH
LEGENDWIDTH = float(args.LEGENDWIDTH)
global MARGIN
MARGIN = float(args.MARGIN)
global FONTSIZE
FONTSIZE = float(args.FONTSIZE)
global LABELSHIFT
LABELSHIFT = float(args.LABELSHIFT)
if args.labelPosition != "Right":
LEGENDWIDTH = 0
########################## read in files
abundances_file = open(args.abundance_name, "r")
abundances_d = {}
#abundance_total_d {}
timeToDate_d = {}
times_index = 'NA'
for line in abundances_file:
line_l = line.strip().split(",")
if line_l[0] == "names":
times_index = line_l.index("times")
a_index = line_l.index("abundances")
date_index = line_l.index("date")
elif times_index == 'NA':
print("First line of abundances_file must start with 'names' col and contain 'times', 'abundances', and 'date' cols, with no spaces between commas\n")
sys.exit(1)
else:
time = line_l[times_index]
if int(time) >= int(args.MINTIME):
clade = line_l[0]
abundance = line_l[a_index]
if time not in abundances_d:
abundances_d[time] = {}
#if clade not in abundance_total_d:
# abundance_total_d[clade] = 0
#abudance_total_d[clade] += abundance
abundances_d[time][clade] = abundance
if time not in timeToDate_d:
timeToDate_d[time] = line_l[date_index]
abundances_file.close()
times_l = abundances_d.keys()
times_l = list(times_l)
times_l.sort(key=int)
startDay = date.fromisoformat(timeToDate_d[times_l[0]])
endDay = date.fromisoformat(timeToDate_d[times_l[1]])
global timeWindow
timeWindow = (endDay-startDay).days
hierarchy_file = open(args.parentHierarchy_name, "r")
childParent_d = {}
cladeColor_d = {}
hasHeader = False
hasColor = False
for line in hierarchy_file:
line_l = line.strip().split(",")
if line_l[0] == "names" and line_l[1] == "parents":
hasHeader = True
if len(line_l) > 2:
if line_l[2] == "color":
hasColor = True
elif not hasHeader:
print("First line of parentHierarchy_name must have 'names' as first col and 'parents' as second col, sperated with commas and no spaces")
sys.exit(1)
else:
childParent_d[line_l[0]] = line_l[1]
if hasColor:
if line_l[2] != "":
cladeColor_d[line_l[0]] = line_l[2]
else:
cladeColor_d[line_l[0]] = makeColor()
else:
cladeColor_d[line_l[0]] = makeColor()
hierarchy_file.close()
abundances_d, heiarchy_d = removeSmallClades(abundances_d, childParent_d, args.MINTOTALCOUNT)
########################## parse clades
times_l = abundances_d.keys()
times_l = list(times_l)
#timeLabs_s = set()
clades_l = []
root_clades_l = []
for clade in childParent_d:
clade = Clade(clade, childParent_d[clade])
setattr(clade, "color", cladeColor_d[clade.name])
#TODO make this determatistic for most diverse colors and add in more color options
clades_l.append(clade)
if clade.parent_name == "NA":
root_clades_l.append(clade)
#add pointers to children clade objects
for parent in clades_l:
setattr(parent, "children", [])
for child in clades_l:
if child.parent_name == parent.name:
parent.children = parent.children + [child]
setattr(parent, "children_count", len(parent.children))
#add pointers to parent clade objects
for child in clades_l:
setattr(child, "parent", Clade("NA", "NA"))
for parent in clades_l:
if parent.name == child.parent_name:
setattr(child, "parent", parent)
#rolling average over windows
if args.avgWindow is not None:
avgWindow = args.avgWindow
abudances_times_l = list(abundances_d.keys())
abudances_times_l.sort(key=int)
lastTime_int = int(abudances_times_l[-1])
abundances_roll_d = {}
for time in abudances_times_l:
abundances_roll_d[time] = {}
rollRange = int(avgWindow)
while lastTime_int + 1 < int(time) + rollRange :
rollRange -= 1
for rollTime_delta in range(rollRange):
rollTime = str(int(time) + rollTime_delta)
for clade_o in clades_l:
clade = clade_o.name
if clade in abundances_d[rollTime]:
if clade not in abundances_roll_d[time]:
abundances_roll_d[time][clade] = 0
abundances_roll_d[time][clade] += float(abundances_d[rollTime][clade])/rollRange
abundances_d = abundances_roll_d
#record abundances in objects
allTimes_l = []
minTimeOfAbundance = 'NA'
for time in abundances_d:
if int(time) >= int(args.MINTIME):
if minTimeOfAbundance == 'NA':
minTimeOfAbundance = time
elif int(minTimeOfAbundance) > int(time):
minTimeOfAbundance = time
t = Snapshot(time, timeToDate_d[time])
for clade in clades_l:
if clade.name in abundances_d[time]:
abundance = float(abundances_d[time][clade.name])
else:
abundance = 0
clade_oneTime = CladeSnapshot(clade, t, abundance)
t.cladeSnapshot_clade_d[clade.name] = clade_oneTime
t.sumAll += abundance
clade.cladeSnapshot_time_d[time] = clade_oneTime
if t.sumAll > 0:
allTimes_l.append(t)
else:
times_l.remove(time)
print("No samples during time: ", time)
#calculate Descendants and children
for snap in allTimes_l:
for cladeSnap in snap.cladeSnapshot_clade_d.values():
cladeSnap.sumDescendant = cladeSnap.abundance + cladeSnap.sumUpDescendants()
# ################################ determain plotting values
topPlot = MARGIN
bottomPlot = HEIGHT-MARGIN
for snap in allTimes_l:
totalHeight = (bottomPlot-topPlot)
scaleFactor = totalHeight/snap.sumAll
descendantSpace = 0
for clade in root_clades_l:
descendantSpace += clade.cladeSnapshot_time_d[snap.time].sumDescendant
numSections = len(root_clades_l) + 1
parentEdgeHeight = scaleFactor*(snap.sumAll - descendantSpace)/numSections
acountedHeight = topPlot
for clade in root_clades_l:
cladeSnap = clade.cladeSnapshot_time_d[snap.time]
y1 = acountedHeight + parentEdgeHeight
y2 = y1 + (scaleFactor*cladeSnap.sumDescendant)
acountedHeight += (scaleFactor*cladeSnap.sumDescendant) + parentEdgeHeight
clade.y1_d[snap.time] = y1
clade.y2_d[snap.time] = y2
defineChildBoundries(snap.time, scaleFactor, cladeSnap, y1, y2)
scaleTime = (WIDTH-(MARGIN+LEGENDWIDTH))/len(times_l)
drawWrapper(args.outFolder, "relative_abundance", root_clades_l, scaleTime, times_l, 100, minTimeOfAbundance, args.labelPosition, args.xlabel, timeToDate_d)
# ########################## make fig with number of samples scaling
topPlot = MARGIN
bottomPlot = HEIGHT-MARGIN
maxCount = 0
for snap in allTimes_l:
if snap.sumAll > maxCount:
maxCount = snap.sumAll
for snap in allTimes_l:
#adjust relative to scalling
lessThanMax = maxCount - snap.sumAll
lessThanMax_ratio = snap.sumAll/maxCount
totalHeight = (bottomPlot-topPlot)*lessThanMax_ratio
scaleFactor = totalHeight/snap.sumAll
descendantSpace = 0
for clade in root_clades_l:
descendantSpace += clade.cladeSnapshot_time_d[snap.time].sumDescendant
numSections = len(root_clades_l) + 1
parentEdgeHeight = scaleFactor*(snap.sumAll - descendantSpace)/numSections
acountedHeight = topPlot + (scaleFactor*lessThanMax)/2 #do not split in half for plot to be on bottom
for clade in root_clades_l:
cladeSnap = clade.cladeSnapshot_time_d[snap.time]
y1 = acountedHeight + parentEdgeHeight
y2 = y1 + (scaleFactor*cladeSnap.sumDescendant)
acountedHeight += (scaleFactor*cladeSnap.sumDescendant) + parentEdgeHeight
clade.y1_d[snap.time] = y1
clade.y2_d[snap.time] = y2
defineChildBoundries(snap.time, scaleFactor, cladeSnap, y1, y2)
scaleTime = (WIDTH-(MARGIN+LEGENDWIDTH))/len(times_l)
drawWrapper(args.outFolder, "sequence_scaled_lineages", root_clades_l, scaleTime, times_l, maxCount, minTimeOfAbundance, args.labelPosition, args.xlabel, timeToDate_d)
########################## make fig with cases scaling
if args.cases_name is not None:
if "strain" in line_l and "date" in line_l:
strain_index = line_l.index("strain")
date_index = line_l.index("date")
date_index = "na"
case_index = "na"
dateToCase_raw_d = {}
cases_file = open(args.cases_name, "r")
for line in cases_file:
line_l = re.split('\t|,', line.strip())
if date_index == "na":
if "confirmed_rolling" in line_l and "date" in line_l:
date_index = line_l.index("date")
case_index = line_l.index("confirmed_rolling")
elif "cases" in line_l and "date" in line_l:
date_index = line_l.index("date")
case_index = line_l.index("cases")
else:
print("file with cases - formated with 'date' in ISO format and 'confirmed_rolling' cases, in tsv format")
sys.exit(1)
else:
dateToCase_raw_d[line_l[date_index].replace('"', "")] = float(line_l[case_index].replace('"', ""))
dateToCase_d = {}
if args.avgWindow is None:
for k in dateToCase_raw_d.keys():
day = date.fromisoformat(k)
d = day + timedelta(days=timeWindow)
if d.isoformat() in dateToCase_raw_d:
c = 0
for i in range(timeWindow):
d = day + timedelta(days=i)
c += dateToCase_raw_d[d.isoformat()]
dateToCase_d[d.isoformat()] = c/timeWindow
else:
avgWindow = int(args.avgWindow)
totalWindow = timeWindow*avgWindow
for k in dateToCase_raw_d.keys():
day = date.fromisoformat(k)
d = day + timedelta(days=totalWindow)
if d.isoformat() in dateToCase_raw_d:
c = 0
for i in range(totalWindow):
d = day + timedelta(days=i)
c += dateToCase_raw_d[d.isoformat()]
dateToCase_d[d.isoformat()] = c/totalWindow
topPlot = MARGIN
bottomPlot = HEIGHT-MARGIN
maxCount = 0
for snap in allTimes_l:
rollCase = dateToCase_d[timeToDate_d[snap.time]]
if rollCase > maxCount:
maxCount = rollCase
for snap in allTimes_l:
rollCase = dateToCase_d[timeToDate_d[snap.time]]
cases_lessThanMax = maxCount - rollCase
psudoAbundance_lessThanMax = (cases_lessThanMax/rollCase)*snap.sumAll
lessThanMax_ratio = rollCase/maxCount
totalHeight = (bottomPlot-topPlot)*lessThanMax_ratio
scaleFactor = totalHeight/snap.sumAll
descendantSpace = 0
for clade in root_clades_l:
descendantSpace += clade.cladeSnapshot_time_d[snap.time].sumDescendant
numSections = len(root_clades_l) + 1
parentEdgeHeight = scaleFactor*(snap.sumAll - descendantSpace)/numSections
acountedHeight = topPlot + (psudoAbundance_lessThanMax*scaleFactor)/2
for clade in root_clades_l:
cladeSnap = clade.cladeSnapshot_time_d[snap.time]
y1 = acountedHeight + parentEdgeHeight
y2 = y1 + (scaleFactor*cladeSnap.sumDescendant)
acountedHeight += (scaleFactor*cladeSnap.sumDescendant) + parentEdgeHeight
clade.y1_d[snap.time] = y1
clade.y2_d[snap.time] = y2
defineChildBoundries(snap.time, scaleFactor, cladeSnap, y1, y2)
scaleTime = (WIDTH-(MARGIN+LEGENDWIDTH))/len(times_l)
drawWrapper(args.outFolder, "case_scaled_lineages", root_clades_l, scaleTime, times_l, maxCount, minTimeOfAbundance, args.labelPosition, args.xlabel, timeToDate_d)
else:
print("No case data supplied - skipping case scaled plot")
if __name__ == "__main__":
main()
|
the-stack_106_21449
|
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums = sorted(nums)
res = 2**32-1
res_list = []
for i in range(len(nums)):
new_target = target-nums[i]
new_nums = nums[:i]+nums[i+1:]
left = 0
right = len(new_nums)-1
while left<right:
if abs(new_nums[left]+new_nums[right]-new_target)<res:
res = abs(new_nums[left]+new_nums[right]-new_target)
res_list = [nums[i], new_nums[left], new_nums[right]]
if new_nums[left]+new_nums[right] < new_target:
left += 1
elif new_nums[left]+new_nums[right] > new_target:
right -= 1
else:
return sum([nums[i], new_nums[left], new_nums[right]])
return sum(res_list)
# Exercise 20200229
class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) < 3:
return None
nums = sorted(nums)
res = nums[0]+nums[1]+nums[2]
for i in range(len(nums)):
cur_sum = nums[i]+self.twoSumClosest(nums[:i]+nums[i+1:], target-nums[i])
if abs(cur_sum-target) < abs(res-target):
res = cur_sum
print(nums[i], cur_sum)
return res
def twoSumClosest(self, nums, target):
left = 0
right = len(nums)-1
res_sum = nums[0]+nums[-1]
gap = abs(res_sum-target)
while left < right:
if nums[left]+nums[right] == target:
return target
else:
cur_gap = abs(nums[left]+nums[right] - target)
if cur_gap < gap:
res_sum = nums[left]+nums[right]
gap = cur_gap
if nums[left]+nums[right] < target:
left += 1
else:
right -= 1
return res_sum
|
the-stack_106_21451
|
import cv2
import numpy as np
import pandas as pd
image = 'Assets\style-4.png'
face_cascade = cv2.CascadeClassifier("./Dataset/haarcascade_frontalface_alt.xml")
eyecascade = cv2.CascadeClassifier("./Dataset/frontalEyes35x16.xml")
nosecascade = cv2.CascadeClassifier("./Dataset/Nose18x15.xml")
glasses = cv2.imread('./FilterImages/glasses.png', -1)
mustache = cv2.imread('./FilterImages/mustache.png', -1)
frame = cv2.imread(image)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, 1.2, 5)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
for face in faces:
x, y, w, h = face
rol_gray = gray_frame[y:y+h, x:x+h]
rol_color = frame[y:y+h, x:x+h]
eyes = eyecascade.detectMultiScale(rol_gray,1.3,3)
for (ex,ey,ew,eh) in eyes:
glasses = cv2.resize(glasses,(ew,eh))
gw, gh, gc = glasses.shape
for i in range(0, gw):
for j in range(0, gh):
if glasses[i,j][3] != 0:
rol_color[ey + i, ex + j] = glasses[i,j]
nose = nosecascade.detectMultiScale(rol_gray, 1.3, 7)
for (nx, ny, nw, nh) in nose:
mustache = cv2.resize(mustache, (nw+10,nh))
mw, mh, mc = mustache.shape
for i in range(0,mw):
for j in range(0, mh):
if mustache[i,j][3] != 0:
rol_color[ny+int(nh/2.0)+i, nx+j+3] = mustache[i,j]
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
cv2.imshow("window", frame)
frame = np.resize(frame,(-1,3))
pd.DataFrame(frame, columns=['Channel 1', 'Channel 2', 'Channel 3']).to_csv('./Prediction.csv', index= False)
cv2.waitKey(0)
|
the-stack_106_21452
|
"""
A controller implementing a (too) simple coordinated voltage control algorithm.
"""
import collections
import mosaik_api
from itertools import count
import fmipp
import xml.etree.ElementTree as ETree
import os.path
META = {
'models': {
'TC3Controller': {
'public': True,
'params': ['vlow', 'vup'],
'attrs': ['u3', 'u4', 'tap'],
},
},
}
class TC3Controller(mosaik_api.Simulator):
def __init__(self):
super().__init__(META)
self.data = collections.defaultdict(dict)
self._entities = {}
self.eid_counters = {}
self.is_responsive = {} # controller state regarding dead time
self.wakeup_time = {} # time stamp until end of dead time
self.dead_time = 0 # dead time of controller
self.work_dir = None # directory of FMU
self.model_name = None # model name of FMU
self.instance_name = None # instance name of FMU
self.var_table = None # dict of FMU variables (input, output, parameters)
self.translation_table = None # help dict if variable names cannot be parsed properly in Python
self.logging_on = False # FMI++ parameter
self.time_diff_resolution = 1e-9 # FMI++ parameter
self.timeout = 0 # FMI++ parameter
self.interactive = False # FMI++ parameter
self.visible = False # FMI++ parameter
self.start_time = 0 # FMI++ parameter
self.stop_time = 0 # FMI++ parameter
self.stop_time_defined = False # FMI++ parameter
self.uri_to_extracted_fmu = None
self.sec_per_mt = 1 # Number of seconds of internaltime per mosaiktime
self.fmutimes = {} # Keeping track of each FMU's internal time
self.verbose = False
def init( self, sid, work_dir, model_name, instance_name, dead_time=0, start_time=0, stop_time=0,
logging_on = False, time_diff_resolution=1e-9, timeout=0, interactive=False, visible=False,
stop_time_defined=False, seconds_per_mosaik_timestep=1, var_table=None, translation_table=None,
verbose=False ):
self.dead_time = dead_time / seconds_per_mosaik_timestep
self.work_dir = work_dir
self.model_name = model_name
self.instance_name = instance_name
self.start_time = start_time
self.stop_time = stop_time
self.logging_on = logging_on
self.time_diff_resolution = time_diff_resolution # How close should two events be to be considered equal?
self.timeout = timeout
self.interactive = interactive
self.visible = visible
self.stop_time_defined = stop_time_defined
self.sec_per_mt = seconds_per_mosaik_timestep # Number of seconds of internaltime per mosaiktime (Default: 1, mosaiktime measured in seconds)
self.verbose = verbose
path_to_fmu = os.path.join(self.work_dir, self.model_name + '.fmu')
if self.verbose: print('Attempted to extract FMU {0}, Path {1}'.format(path_to_fmu, self.work_dir))
self.uri_to_extracted_fmu = fmipp.extractFMU(path_to_fmu, self.work_dir)
assert self.uri_to_extracted_fmu is not None
# If no variable table is given by user, parse the modelDescription.xml for a table -
# however, this will not work properly for some FMUs due to varying conventions.
xmlfile = os.path.join( self.work_dir, self.model_name, 'modelDescription.xml' )
if var_table is None:
self.var_table, self.translation_table = self.get_var_table( xmlfile )
else:
self.var_table = var_table
self.translation_table = translation_table
self.adjust_var_table()
return self.meta
def create(self, num, model, vlow=0.95, vup=1.05):
counter = self.eid_counters.get(model, count())
entities = []
for i in range(num):
eid = '%s_%s' % (model, next(counter)) # entity ID
if self.verbose: print('{0}, {1}, {2}, {3}'.format(self.uri_to_extracted_fmu, self.model_name, self.logging_on, self.time_diff_resolution))
fmu = fmipp.FMUCoSimulationV1( self.uri_to_extracted_fmu, self.model_name,
self.logging_on, self.time_diff_resolution )
self._entities[eid] = fmu
status = self._entities[eid].instantiate( self.instance_name, self.timeout,
self.visible, self.interactive )
assert status == fmipp.fmiOK
status = self._entities[eid].initialize( self.start_time*self.sec_per_mt,
self.stop_time_defined, self.stop_time*self.sec_per_mt )
assert status == fmipp.fmiOK
self.data[eid] = { 'tap': 0 }
self.set_values( eid, { 'u3': 1., 'u4': 1., 'vlow': vlow, 'vup': vup }, 'input' )
self.is_responsive[eid] = True
self.wakeup_time[eid] = None
# Handling tracking internal fmu times
self.fmutimes[eid] = self.start_time*self.sec_per_mt
entities.append( { 'eid': eid, 'type': model, 'rel': [] } )
return entities
def step(self, time, inputs):
#print( 'CONTROLLER called at t = {}, inputs = {}'.format( time, inputs ) )
# This is the internal time.
target_time = ( time + self.start_time )*self.sec_per_mt
for eid, fmu in self._entities.items():
status = fmu.doStep( self.fmutimes[eid], target_time - self.fmutimes[eid], True )
assert status == fmipp.fmiOK
self.fmutimes[eid] += target_time - self.fmutimes[eid]
for eid, edata in self.data.items():
input_data = inputs.get(eid, {})
[ ( _, u3 ) ] = input_data['u3'].items() if 'u3' in input_data else [ ( None, None ) ]
[ ( _, u4 ) ] = input_data['u4'].items() if 'u4' in input_data else [ ( None, None ) ]
if True is self.is_responsive[eid]: # Controller is responsive.
if u3 is not None or u4 is not None:
new_tap = self.decide_on_tap(eid, u3, u4)
edata['tap'] = new_tap
if self.verbose: print( "Decided on tap {} at time {}".format( new_tap, time ) )
# Enter dead time.
self.is_responsive[eid] = False
self.wakeup_time[eid] = time + self.dead_time
else:
edata['tap'] = None # No inputs --> no output.
else: # Controller is not responsive (dead time).
if time >= self.wakeup_time[eid]:
self.wakeup_time[eid] = None
self.is_responsive[eid] = True
return time + 1
def decide_on_tap( self, eid, u3, u4 ):
fmu_inputs = {}
if u3 is not None: fmu_inputs['u3'] = u3
if u4 is not None: fmu_inputs['u4'] = u4
self.set_values( eid, fmu_inputs, 'input' )
status = self._entities[eid].doStep( self.fmutimes[eid], 0, True )
assert status == fmipp.fmiOK
return self.get_value( eid, 'tap' )
def get_data(self, outputs):
data = {}
for eid, edata in self.data.items():
requests = outputs[eid]
mydata = {}
for attr in requests:
try:
mydata[attr] = edata[attr] if self.is_responsive[eid] is True else None
except KeyError:
raise RuntimeError("OLTC controller has no attribute {0}".format(attr))
data[eid] = mydata
return data
def get_var_table( self, filename ):
var_table = {}
translation_table = {}
base = ETree.parse(filename).getroot()
mvars = base.find('ModelVariables')
for var in mvars.findall('ScalarVariable'):
causality = var.get('causality')
name = var.get('name')
if causality in ['input', 'output', 'parameter']:
var_table.setdefault(causality, {})
translation_table.setdefault(causality, {})
# Variable names including '.' cannot be used in Python scripts - they get aliases with '_':
if '.' in name:
alt_name = name.replace('.', '_')
else:
alt_name = name
translation_table[causality][alt_name] = name
# Store variable type information:
specs = var.getchildren()
for spec in specs:
if spec.tag in ['Real', 'Integer', 'Boolean', 'String']:
var_table[causality][name] = spec.tag
continue
return var_table, translation_table
def adjust_var_table(self):
'''Helper function that adds missing keys to the var_table and its associated translation table.
Avoids errors due to faulty access later on.'''
self.var_table.setdefault('parameter', {})
self.var_table.setdefault('input', {})
self.var_table.setdefault('output', {})
self.translation_table.setdefault('parameter', {})
self.translation_table.setdefault('input', {})
self.translation_table.setdefault('output', {})
def set_values(self, eid, val_dict, var_type):
'''Helper function to set input variable and parameter values to a FMU instance'''
for alt_name, val in val_dict.items():
name = self.translation_table[var_type][alt_name]
# Obtain setter function according to specified var type (Real, Integer, etc.):
set_func = getattr(self._entities[eid], 'set' + self.var_table[var_type][name] + 'Value')
set_stat = set_func(name, val)
assert set_stat == fmipp.fmiOK
def get_value(self, eid, alt_attr):
'''Helper function to get output variable values from a FMU instance.'''
attr = self.translation_table['output'][alt_attr]
# Obtain getter function according to specified var type (Real, Integer, etc.):
get_func = getattr(self._entities[eid], 'get' + self.var_table['output'][attr] + 'Value')
val = get_func(attr)
#if val is not 0: print( 'get_value attr = {}, val = {}'.format( attr, val ) )
return val
if __name__ == '__main__':
mosaik_api.start_simulation(TC3Controller())
|
the-stack_106_21453
|
#!/usr/bin/env python
DESCRIPTION = '''
order_columns - Takes an ORDERED list of column names (one per line) to return from input file.
Columns not in input list will not be in output.
First column name in input list will be the first column, seconds column name in input list will be the second column, etc
NOTE:
- Ignore comment ('#') and blank lines
- Assume first non-comment line has column headers.
- Uses exact string matching. Can not do regex or partial matching.
'''
import sys
import argparse
import logging
VERSION=0.1
## Pass arguments.
def main():
# Pass command line arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=DESCRIPTION)
parser.add_argument('-i', '--input', metavar='data_to_filter.txt', default=sys.stdin, type=argparse.FileType('r'), required=False, help='Input file to filter (default: stdin)')
parser.add_argument('-o', '--output', metavar='data_filtered.txt', default=sys.stdout, type=argparse.FileType('w'), required=False, help='Output file of ordered columns (default: stdout)')
parser.add_argument('-f', '--file', metavar='ordered_col_names.txt', type=argparse.FileType('r'), required=True, help='Ordered column names to keep/reorder (required)')
parser.add_argument('--delim', default='\t', type=str, required=False, help='Delimiter for --input (default: \\t)')
parser.add_argument('--debug', action='store_true', required=False, help='Print DEBUG info (default: %(default)s)')
args = parser.parse_args()
# Set up basic debugger
logFormat = "[%(levelname)s]: %(message)s"
logging.basicConfig(format=logFormat, stream=sys.stderr, level=logging.INFO)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug('%s', args) ## DEBUG
ordered_col_names = load_id_list(args.file) # Load column names/ids
# For each line in input file
is_first_line = True
for line in args.input:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
if is_first_line:
is_first_line = False
col_names = line.split(args.delim)
# For each ordered ID figure out its index/position in the avaliable columns
col_output_order = []
for name in ordered_col_names:
try:
col_output_order.append(col_names.index(name))
except ValueError:
logging.error("column name '%s' in ordered list not found in input file!", name)
sys.exit(1)
logging.debug('col_output_order: %s', col_output_order) ## DEBUG
# Now output the column names we want in the order we want.
tmp_write = []
for i in col_output_order:
tmp_write.append(col_names[i])
args.output.write(args.delim.join(tmp_write) + '\n')
continue
# If line is not header line write columns in the order we established
line_split = line.split(args.delim)
tmp_write = []
for i in col_output_order:
try:
tmp_write.append(line_split[i])
except IndexError:
logging.error("Column index '%s' does not exist for split line: '%s'", i, line_split)
sys.exit(1)
args.output.write(args.delim.join(tmp_write) + '\n')
def load_id_list(id_file):
'''
Loads a lit of IDs into a list.
'''
ids = []
for line in id_file:
line = line.strip()
if not line or line.startswith('#'):
continue
ids.append(line)
logging.debug('IDS: %s', ids) ## DEBUG
logging.debug('Number of ids loaded: %s', len(ids)) ## DEBUG
return ids
if __name__ == '__main__':
main()
|
the-stack_106_21454
|
# -*- coding: utf-8 -*-
from typing import Union, List
from zvdata import IntervalLevel
from zvdata.api import get_entities, decode_entity_id, get_data
from zvdata.domain import get_db_session
from zvt.accounts.ccxt_account import CCXTAccount
from zvt.api.common import get_kdata_schema
from zvt.domain import StockCategory, IndexMoneyFlow
from zvt.domain.stock_meta import Index
def get_indices(provider: str = 'sina',
block_category: Union[str, StockCategory] = 'concept',
return_type: str = 'df') -> object:
"""
get indices/blocks on block_category
:param provider:
:type provider:
:param block_category:
:type block_category:
:param return_type:
:type return_type:
:return:
:rtype:
"""
if type(block_category) == StockCategory:
block_category = block_category.value
session = get_db_session(provider=provider, data_schema=Index)
filters = [Index.category == block_category]
blocks = get_entities(entity_type='index', provider=provider, filters=filters,
session=session, return_type=return_type)
return blocks
get_blocks = get_indices
def in_filters(col, values):
filters = None
if values:
for value in values:
if filters:
filters |= (col == value)
else:
filters = (col == value)
return filters
def get_securities_in_blocks(provider: str = 'eastmoney',
categories: List[Union[str, StockCategory]] = ['concept', 'industry'],
names=None, codes=None, ids=None):
session = get_db_session(provider=provider, data_schema=Index)
categories = [StockCategory(category).value for category in categories]
filters = [Index.category.in_(categories)]
# add name filters
if names:
filters.append(Index.name.in_(names))
blocks = get_entities(entity_ids=ids, codes=codes, entity_type='index', provider=provider,
filters=filters, return_type='domain', session=session)
securities = []
for block in blocks:
securities += [item.stock_id for item in block.stocks]
return securities
def get_kdata(entity_id, level=IntervalLevel.LEVEL_1DAY.value, provider='eastmoney', columns=None,
return_type='df', start_timestamp=None, end_timestamp=None,
filters=None, session=None, order=None, limit=None):
entity_type, exchange, code = decode_entity_id(entity_id)
data_schema = get_kdata_schema(entity_type, level=level)
return get_data(data_schema=data_schema, entity_id=entity_id, level=level, provider=provider, columns=columns,
return_type=return_type,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp, filters=filters, session=session, order=order, limit=limit)
def get_current_price(entity_ids=None, entity_type='coin'):
result = {}
if entity_type == 'coin':
if entity_ids:
for entity_id in entity_ids:
a, exchange, code = decode_entity_id(entity_id)
assert a == entity_type
ccxt_exchange = CCXTAccount.get_ccxt_exchange(exchange_str=exchange)
if not ccxt_exchange:
raise Exception('{} not support'.format(exchange))
orderbook = ccxt_exchange.fetch_order_book(code)
bid = orderbook['bids'][0][0] if len(orderbook['bids']) > 0 else None
ask = orderbook['asks'][0][0] if len(orderbook['asks']) > 0 else None
entity_id = f'coin_{exchange}_{code}'
result[entity_id] = (bid, ask)
return result
if __name__ == '__main__':
money_flow_session = get_db_session(provider='sina', data_schema=IndexMoneyFlow)
entities = get_entities(entity_type='index',
return_type='domain', provider='sina',
# ๅชๆๆฆๅฟตๅ่กไธ
filters=[Index.category.in_(
[StockCategory.industry.value, StockCategory.concept.value])])
for entity in entities:
sql = 'UPDATE index_money_flow SET name="{}" where code="{}"'.format(
entity.name, entity.code)
money_flow_session.execute(sql)
money_flow_session.commit()
|
the-stack_106_21458
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_06_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> "models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
|
the-stack_106_21459
|
from sortedcontainers import SortedDict
from .orderlist import OrderList
from .order import Order
class OrderTree(object):
'''A red-black tree used to store OrderLists in price order
The exchange will be using the OrderTree to hold bid and ask data (one OrderTree for each side).
Keeping the information in a red black tree makes it easier/faster to detect a match.
'''
def __init__(self):
self.price_map = SortedDict() # Dictionary containing price : OrderList object
self.prices = self.price_map.keys()
self.order_map = {} # Dictionary containing order_id : Order object
self.volume = 0 # Contains total quantity from all Orders in tree
self.num_orders = 0 # Contains count of Orders in tree
self.depth = 0 # Number of different prices in tree (http://en.wikipedia.org/wiki/Order_book_(trading)#Book_depth)
def __len__(self):
return len(self.order_map)
def get_price_list(self, price):
return self.price_map[price]
def get_order(self, order_id):
return self.order_map[order_id]
def create_price(self, price):
self.depth += 1 # Add a price depth level to the tree
new_list = OrderList()
self.price_map[price] = new_list
def remove_price(self, price):
self.depth -= 1 # Remove a price depth level
del self.price_map[price]
def price_exists(self, price):
return price in self.price_map
def order_exists(self, order):
return order in self.order_map
def insert_order(self, quote):
if self.order_exists(quote['order_id']):
self.remove_order_by_id(quote['order_id'])
self.num_orders += 1
if quote['price'] not in self.price_map:
self.create_price(quote['price']) # If price not in Price Map, create a node in RBtree
order = Order(quote, self.price_map[quote['price']]) # Create an order
self.price_map[order.price].append_order(order) # Add the order to the OrderList in Price Map
self.order_map[order.order_id] = order
self.volume += order.quantity
def update_order(self, order_update):
order = self.order_map[order_update['order_id']]
original_quantity = order.quantity
if order_update['price'] != order.price:
# Price changed. Remove order and update tree.
order_list = self.price_map[order.price]
order_list.remove_order(order)
if len(order_list) == 0: # If there is nothing else in the OrderList, remove the price from RBtree
self.remove_price(order.price)
self.insert_order(order_update)
else:
# Quantity changed. Price is the same.
order.update_quantity(order_update['quantity'], order_update['timestamp'])
self.volume += order.quantity - original_quantity
def remove_order_by_id(self, order_id):
self.num_orders -= 1
order = self.order_map[order_id]
self.volume -= order.quantity
order.order_list.remove_order(order)
if len(order.order_list) == 0:
self.remove_price(order.price)
del self.order_map[order_id]
def max_price(self):
if self.depth > 0:
return self.prices[-1]
else:
return None
def min_price(self):
if self.depth > 0:
return self.prices[0]
else:
return None
def max_price_list(self):
if self.depth > 0:
return self.get_price_list(self.max_price())
else:
return None
def min_price_list(self):
if self.depth > 0:
return self.get_price_list(self.min_price())
else:
return None
|
the-stack_106_21462
|
from data_utils import FEATURES, label_to_classs_name, collapse_clusters
import matplotlib.pyplot as plt
import numpy as np
import scipy
import seaborn as sns
import re
sns.set()
def to_feature_name(feature):
return re.sub(r'^calls_to_(.+)$', r'\1()', feature)
def plot_class_stats(call_data, labels, ordering, ax, scale='log'):
unique_labels = tuple(sorted(frozenset(labels)))
ordered_labels = [ unique_labels[i] for i in ordering ]
fills_by_label = [
sum(d['total_fills'] for (d, dl) in zip(call_data, labels) if dl == label)
for label in ordered_labels
]
orders_by_label = [
sum(d['total_orders'] for (d, dl) in zip(call_data, labels) if dl == label)
for label in ordered_labels
]
bar_width = 1 / 3
ax.clear()
ax.bar(
[i - bar_width / 2 for i in range(len(unique_labels))],
fills_by_label,
bar_width,
align='center',
label='total fills',
color=(0.882, 0.498, 0.819),
)
ax.bar(
[i + bar_width / 2 for i in range(len(unique_labels))],
orders_by_label,
bar_width,
align='center',
label='total orders',
color=(0.262, 0.839, 0.8),
)
ax.set_yscale(scale)
ax.set_xticks(np.arange(-0.5, len(unique_labels) + 0.5, 1))
ax.tick_params(labelbottom=False, bottom=False)
ax.set_xlim(-0.5, len(unique_labels) - 1 + 0.5)
ax.legend()
def plot_method_stats(call_data, ordering, ax, scale='log'):
ordered_features = [ FEATURES[i] for i in reversed(ordering) ]
method_counts = [
sum(int(d[f] * d['total_calls']) if f in d else 0 for d in call_data)
if f.startswith('calls_to_') else 0
for f in ordered_features
]
ax.clear()
ax.barh(
list(range(len(ordered_features))),
method_counts,
0.5,
align='center',
label='total calls',
color=(0.5, 0.75, 0.5),
)
ax.set_xscale(scale)
ax.invert_xaxis()
ax.set_yticks(np.arange(-0.5, len(ordered_features) + 0.5, 1))
ax.tick_params(labelright=False, labelleft=False, right=False)
ax.set_ylim(-0.5, len(ordered_features) - 1 + 0.5)
ax.legend()
def create_label_names(call_data, labels):
unique_labels = sorted(frozenset(labels))
names = []
for label in unique_labels:
label_calls = [
d
for (d, cl)
in zip(call_data, labels)
if cl == label
]
num_unique_senders = sum(
d['unique_senders']
for d in label_calls
)
name = label_to_classs_name(label)
if num_unique_senders == 0:
name = '๐ %s' % name
name = '%s (%d)' % (name, len(label_calls))
names.append(name)
return names
def reorder(items, row_ordering=None, col_ordering=None):
if row_ordering is None:
return items
reordered = [ None ] * len(items)
for i, o in enumerate(row_ordering):
y = items[o]
reordered[i] = reorder(y, col_ordering) if col_ordering else y
return reordered
def plot_heatmap(
call_data,
labels,
draw_dendrogram=False,
linear_scale=False,
attenuate=0,
brighten=0,
title=None,
col_ordering=None,
row_ordering=None,
):
features = collapse_clusters(
call_data,
labels,
attenuate=attenuate,
brighten=brighten,
)
unique_labels = sorted(frozenset(labels))
features = reorder(features, col_ordering, row_ordering)
col_names = reorder(create_label_names(call_data, labels), col_ordering)
row_names = reorder([ to_feature_name(s) for s in FEATURES ], row_ordering)
cg = sns.clustermap(
np.array(features).transpose(),
method='ward',
yticklabels=row_names,
xticklabels=col_names,
linecolor=(1,1,1,0.25),
linewidth=0.005,
cmap='magma',
col_cluster=False if col_ordering else True,
row_cluster=False if row_ordering else True,
)
if col_ordering is None:
col_ordering = cg.dendrogram_col.reordered_ind \
if cg.dendrogram_col is not None else list(range(len(unique_labels)))
if row_ordering is None:
row_ordering = cg.dendrogram_row.reordered_ind \
if cg.dendrogram_row is not None else list(range(len(FEATURES)))
if not draw_dendrogram:
plot_class_stats(
call_data,
labels,
col_ordering,
cg.ax_col_dendrogram.axes,
scale='linear' if linear_scale else 'log',
)
plot_method_stats(
call_data,
row_ordering,
cg.ax_row_dendrogram.axes,
scale='linear' if linear_scale else 'log',
)
plt.subplots_adjust(top=0.975, bottom=0.175, left=0.025, right=0.75)
if title:
cg.fig.canvas.set_window_title(title)
return col_ordering, row_ordering
|
the-stack_106_21463
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from c7n.mu import generate_requirements
import jinja2
@click.command()
@click.option('--package', required=True)
@click.option('--template', type=click.Path())
@click.option('--output', type=click.Path())
def main(package, template, output):
"""recursive dependency pinning for package"""
requirements = generate_requirements(package)
pinned_packages = requirements.split('\n')
if not template and output:
print('\n'.join(pinned_packages))
return
with open(template) as fh:
t = jinja2.Template(fh.read(), trim_blocks=True, lstrip_blocks=True)
with open(output, 'w') as fh:
fh.write(t.render(pinned_packages=pinned_packages))
if __name__ == '__main__':
main()
|
the-stack_106_21464
|
# S/E/I/R : 4 bars in each group
# each group is one week; 3 weeks
# age cat 1 to 5; 5 categories vertically in each bar
# csv format input
# each row has three keys which together identify the row uniquely (age; week; SEIR)
# some hardcoded color maps
# This product includes color
# specifications and designs developed by Cynthia Brewer
# (http://colorbrewer.org/
sequential_colors = [['#f7fcfd','#e5f5f9','#ccece6','#99d8c9','#66c2a4','#41ae76','#238b45','#006d2c','#00441b'],
['#f7fcfd','#e0ecf4','#bfd3e6','#9ebcda','#8c96c6','#8c6bb1','#88419d','#810f7c','#4d004b'],
['#fff7ec','#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#b30000','#7f0000'],
["#fff5f0","#fee0d2","#fcbba1","#fc9272","#fb6a4a","#ef3b2c","#cb181d","#a50f15","#67000d"],
["#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252","#252525","#000000"],
["#7f3b08","#b35806","#e08214","#fdb863","#fee0b6","#f7f7f7","#d8daeb","#b2abd2","#8073ac","#542788","#2d004b"],
["#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#ffffff","#e0e0e0","#bababa","#878787","#4d4d4d","#1a1a1a"],
["#a50026","#d73027","#f46d43","#fdae61","#fee090","#ffffbf","#e0f3f8","#abd9e9","#74add1","#4575b4","#313695"],
["#9e0142","#d53e4f","#f46d43","#fdae61","#fee08b","#ffffbf","#e6f598","#abdda4","#66c2a5","#3288bd","#5e4fa2"],
["#a50026","#d73027","#f46d43","#fdae61","#fee08b","#ffffbf","#d9ef8b","#a6d96a","#66bd63","#1a9850","#006837"],
["#7fc97f","#beaed4","#fdc086","#ffff99","#386cb0","#f0027f","#bf5b17","#666666"],
["#ffffe5","#fff7bc","#fee391","#fec44f","#fe9929","#ec7014","#cc4c02","#993404","#662506"],
["#f7fcf5","#e5f5e0","#c7e9c0","#a1d99b","#74c476","#41ab5d","#238b45","#006d2c","#00441b"],
]
### To-DO handle missing data: DONE, tested in the presence of missing data,
### we get or missing minor bar-lets
import matplotlib.pyplot as plt
import csv
import random
import sys
import os
"""
Arguments:
N:
Number of colors in the map
type:
Types of colormap.
Currently two values are being accepted.
(1)'sequential' case-insensitive; 'S' or 's' can also be passed
(2)'qualitative' case-insensitive; 'Q' or 'q' can also be passed
Returns:
a list of colors with N levels; seq/qual based on type
"""
def getColorMaps(N=10, type='sequential'):
N += 1 # handling the boundary case- we will fix this later. Since we run the loops till N-1
if type in ['sequential' ,'S' ,'s' , 'Sequential']:
# generating a random starting point
startPoint = (random.random(), random.random(), random.random())
# choose a random axis
r = int(random.random() // 0.3333)
color_list = []
zeros = [0,0,0]
zeros[0] = startPoint[0]
zeros[1] = startPoint[1]
zeros[2] = startPoint[2]
zeros[r] = 0 #startPoint[r]
for i in range(N-1):
zeros[r] += ((1)/N)
color_list.append(tuple(zeros))
return color_list
elif type in ['qualitative' ,'Q' ,'q' , 'Qualitative']:
# generating a random starting point
color_list = []
for i in range(N-1):
color_list.append(tuple([random.random(), random.random(), random.random()]))
return color_list
else:
print ("Invalid argument for colormap passed! \n Exiting script")
sys.exit()
"""
Arguments:
filename:
a csv filename with 4 headers, H1, H2, H3 and H4. Each one of H1/H2/H3/H4 are strings.
the first three headers(H1/H2/H3) should identify a row uniquely
the fourth header H4 contains the value (H4 must be integer or floating; cannot be a string)
.csv files without headers will result in the first row being read as headers.
duplicates (relevant for csv inputs):
duplicate entries imply two rows with same <H1/H2/H3> identifier.
In case of duplicates aggregation is performed before proceeding, both the duplicates are binned together to increase the target value
BGV:a python list of three headers in order for stacking (Bars, Groups and Vertical Stacking)
for example, if BGV=[H2, H1, H3], the group stack plot will be such that:
maximum number of bars = number of unique values under column H2
maximum number of bars grouped together horizontally(side-by-side) = number of
unique values under column H1
maximum number of vertical stacks in any bar = number of unique values under column H2
"""
def plot_grouped_stacks(filename, BGV, fig_size=(10, 8),
intra_group_spacing=0.1,
inter_group_spacing=10,
y_loc_for_group_name=-5,
y_loc_for_hstack_name=5,
fontcolor_hstacks='blue',
fontcolor_groups='black',
fontsize_hstacks=20,
fontsize_groups=30,
x_trim_hstack_label=0,
x_trim_group_label=0,
extra_space_on_top=20
):
figure_ = plt.figure(figsize=fig_size)
size = figure_.get_size_inches()
figure_.add_subplot(1,1,1)
# sanity check for inputs; some trivial exception handlings
if intra_group_spacing >= 100:
print ("Percentage for than 100 for variables intra_group_spacing, Aborting! ")
return
else:
intra_group_spacing = intra_group_spacing*size[0]/100 # converting percentanges to inches
if inter_group_spacing >= 100:
print ("Percentage for than 100 for variables inter_group_spacing, Aborting! ")
return
else:
inter_group_spacing = inter_group_spacing*size[0]/100 # converting percentanges to inches
if y_loc_for_group_name >= 100:
print ("Percentage for than 100 for variables inter_group_spacing, Aborting! ")
return
else:
# the multiplier 90 is set empirically to roughly align the percentage value
# <this is a quick fix solution, which needs to be improved later>
y_loc_for_group_name = 90*y_loc_for_group_name*size[1]/100 # converting percentanges to inches
if y_loc_for_hstack_name >= 100:
print ("Percentage for than 100 for variables inter_group_spacing, Aborting! ")
return
else:
y_loc_for_hstack_name = 70*y_loc_for_hstack_name*size[1]/100 # converting percentanges to inches
if x_trim_hstack_label >= 100:
print ("Percentage for than 100 for variables inter_group_spacing, Aborting! ")
return
else:
x_trim_hstack_label = x_trim_hstack_label*size[0]/100 # converting percentanges to inches
if x_trim_group_label >= 100:
print ("Percentage for than 100 for variables inter_group_spacing, Aborting! ")
return
else:
x_trim_group_label = x_trim_group_label*size[0]/100 # converting percentanges to inches
fileread_list = []
with open(filename) as f:
for row in f:
r = row.strip().split(',')
if len(r) != 4:
print ('4 items not found @ line ', c, ' of ', filename)
return
else:
fileread_list.append(r)
# inputs:
bar_variable = BGV[0]
group_variable = BGV[1]
vertical_stacking_variable = BGV[2]
first_line = fileread_list[0]
for i in range(4):
if first_line[i] == vertical_stacking_variable:
header_num_Of_vertical_stacking = i
break
sorted_order_for_stacking = []
for listed in fileread_list[1:]: # skipping the first line
sorted_order_for_stacking.append(listed[header_num_Of_vertical_stacking])
sorted_order_for_stacking = list(set(sorted_order_for_stacking))
list.sort(sorted_order_for_stacking)
sorted_order_for_stacking_V = list(sorted_order_for_stacking)
#####################
first_line = fileread_list[0]
for i in range(4):
if first_line[i] == bar_variable:
header_num_Of_bar_Variable = i
break
sorted_order_for_stacking = []
for listed in fileread_list[1:]: # skipping the first line
sorted_order_for_stacking.append(listed[header_num_Of_bar_Variable])
sorted_order_for_stacking = list(set(sorted_order_for_stacking))
list.sort(sorted_order_for_stacking)
sorted_order_for_stacking_H = list(sorted_order_for_stacking)
######################
first_line = fileread_list[0]
for i in range(4):
if first_line[i] == group_variable:
header_num_Of_bar_Variable = i
break
sorted_order_for_stacking = []
for listed in fileread_list[1:]: # skipping the first line
sorted_order_for_stacking.append(listed[header_num_Of_bar_Variable])
sorted_order_for_stacking = list(set(sorted_order_for_stacking))
list.sort(sorted_order_for_stacking)
sorted_order_for_stacking_G = list(sorted_order_for_stacking)
#########################
print (" Vertical/Horizontal/Groups ")
print (sorted_order_for_stacking_V, " : Vertical stacking labels")
print (sorted_order_for_stacking_H, " : Horizontal stacking labels")
print (sorted_order_for_stacking_G, " : Group names")
# +1 because we need one space before and after as well
each_group_width = (size[0] - (len(sorted_order_for_stacking_G) + 1) *
inter_group_spacing)/len(sorted_order_for_stacking_G)
# -1 because we need n-1 spaces between bars if there are n bars in each group
each_bar_width = (each_group_width - (len(sorted_order_for_stacking_H) - 1) *
intra_group_spacing)/len(sorted_order_for_stacking_H)
# colormaps
number_of_color_maps_needed = len(sorted_order_for_stacking_H)
number_of_levels_in_each_map = len(sorted_order_for_stacking_V)
c_map_vertical = {}
for i in range(number_of_color_maps_needed):
try:
c_map_vertical[sorted_order_for_stacking_H[i]] = sequential_colors[i]
except:
print ("Something went wrong with hardcoded colors!\n reverting to custom colors (linear in RGB) ")
c_map_vertical[sorted_order_for_stacking_H[i]] = getColorMaps(N = number_of_levels_in_each_map, type = 'S')
##
state_num = -1
max_bar_height = 0
for state in sorted_order_for_stacking_H:
state_num += 1
week_num = -1
for week in ['Week 1', 'Week 2','Week 3']:
week_num += 1
a = [0] * len(sorted_order_for_stacking_V)
for i in range(len(sorted_order_for_stacking_V)):
for line_num in range(1,len(fileread_list)): # skipping the first line
listed = fileread_list[line_num]
if listed[1] == state and listed[0] == week and listed[2] == sorted_order_for_stacking_V[i]:
a[i] = (float(listed[3]))
# get cumulative values
cum_val = [a[0]]
for j in range(1,len(a)):
cum_val.append( cum_val[j-1] + a[j] )
max_bar_height = max([max_bar_height, max(cum_val)])
plt.text(x= (week_num)*(each_group_width+inter_group_spacing) - x_trim_group_label
, y=y_loc_for_group_name, s=sorted_order_for_stacking_G[week_num], fontsize=fontsize_groups, color=fontcolor_groups)
# state labels need to be printed just once for each week, hence putting them outside the loop
plt.text(x= week_num*(each_group_width+inter_group_spacing) + (state_num)*(each_bar_width+intra_group_spacing) - x_trim_hstack_label
, y=y_loc_for_hstack_name, s=sorted_order_for_stacking_H[state_num], fontsize=fontsize_hstacks, color = fontcolor_hstacks)
if week_num == 1:
# label only in the first week
for i in range(len(sorted_order_for_stacking_V)-1,-1,-1):
# trick to make them all visible: Plot in descending order of their height!! :)
plt.bar( week_num*(each_group_width+inter_group_spacing) +
state_num*(each_bar_width+intra_group_spacing),
height=cum_val[i] ,
width=each_bar_width,
color=c_map_vertical[state][i],
label= state + "_" + sorted_order_for_stacking_V[i] )
else:
# no label after the first week, (as it is just repetition)
for i in range(len(sorted_order_for_stacking_V)-1,-1,-1):
plt.bar( week_num*(each_group_width+inter_group_spacing) +
state_num*(each_bar_width+intra_group_spacing),
height=cum_val[i] ,
width=each_bar_width,
color=c_map_vertical[state][i])
plt.ylim(0,max_bar_height*(1+extra_space_on_top/100))
plt.tight_layout()
plt.xticks([], [])
plt.legend(ncol=len(sorted_order_for_stacking_H))
return figure_
|
the-stack_106_21466
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
from cirq.contrib.paulistring import ConvertToPauliStringPhasors
def test_convert():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.X(q0),
cirq.Y(q1) ** 0.25,
cirq.Z(q0) ** 0.125,
cirq.H(q1),
)
c_orig = cirq.Circuit(circuit)
ConvertToPauliStringPhasors().optimize_circuit(circuit)
cirq.testing.assert_allclose_up_to_global_phase(circuit.unitary(), c_orig.unitary(), atol=1e-7)
cirq.testing.assert_has_diagram(
circuit,
"""
0: โโโ[X]โโโโโโโโ[Z]^(1/8)โโโโโโโโโ
1: โโโ[Y]^0.25โโโ[Y]^-0.5โโโโ[Z]โโโ
""",
)
def test_convert_keep_clifford():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.X(q0),
cirq.Y(q1) ** 0.25,
cirq.Z(q0) ** 0.125,
cirq.SingleQubitCliffordGate.H(q1),
)
c_orig = cirq.Circuit(circuit)
ConvertToPauliStringPhasors(keep_clifford=True).optimize_circuit(circuit)
cirq.testing.assert_allclose_up_to_global_phase(circuit.unitary(), c_orig.unitary(), atol=1e-7)
cirq.testing.assert_has_diagram(
circuit,
"""
0: โโโXโโโโโโโโโโ[Z]^(1/8)โโโ
1: โโโ[Y]^0.25โโโHโโโโโโโโโโโ
""",
)
def test_already_converted():
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.PauliStringPhasor(cirq.X.on(q0)))
c_orig = cirq.Circuit(circuit)
ConvertToPauliStringPhasors().optimize_circuit(circuit)
assert circuit == c_orig
def test_ignore_unsupported_gate():
class UnsupportedDummy(cirq.testing.TwoQubitGate):
pass
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
UnsupportedDummy()(q0, q1),
)
c_orig = cirq.Circuit(circuit)
ConvertToPauliStringPhasors(ignore_failures=True).optimize_circuit(circuit)
assert circuit == c_orig
def test_fail_unsupported_gate():
class UnsupportedDummy(cirq.testing.TwoQubitGate):
pass
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
UnsupportedDummy()(q0, q1),
)
with pytest.raises(TypeError):
ConvertToPauliStringPhasors().optimize_circuit(circuit)
|
the-stack_106_21468
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import locale
import platform
import traceback
import os
import sys
import subprocess
import requests
from PyQt5.QtCore import QObject
import PyQt5.QtCore as QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import *
from electrum_ltc.i18n import _
from electrum_ltc import ELECTRUM_VERSION, bitcoin, constants
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
report_server = "https://crashhub.electrum-ltc.org/crash"
class Exception_Window(QWidget):
_active_window = None
def __init__(self, main_window, exctype, value, tb):
self.exc_args = (exctype, value, tb)
self.main_window = main_window
QWidget.__init__(self)
self.setWindowTitle('Electrum-LTC - ' + _('An Error Occured'))
self.setMinimumSize(600, 300)
main_box = QVBoxLayout()
heading = QLabel('<h2>' + _('Sorry!') + '</h2>')
main_box.addWidget(heading)
main_box.addWidget(QLabel(_('Something went wrong while executing Electrum.')))
main_box.addWidget(QLabel(
_('To help us diagnose and fix the problem, you can send us a bug report that contains useful debug '
'information:')))
collapse_info = QPushButton(_("Show report contents"))
collapse_info.clicked.connect(lambda: QMessageBox.about(self, "Report contents", self.get_report_string()))
main_box.addWidget(collapse_info)
main_box.addWidget(QLabel(_("Please briefly describe what led to the error (optional):")))
self.description_textfield = QTextEdit()
self.description_textfield.setFixedHeight(50)
main_box.addWidget(self.description_textfield)
main_box.addWidget(QLabel(_("Do you want to send this report?")))
buttons = QHBoxLayout()
report_button = QPushButton(_('Send Bug Report'))
report_button.clicked.connect(self.send_report)
report_button.setIcon(QIcon(":icons/tab_send.png"))
buttons.addWidget(report_button)
never_button = QPushButton(_('Never'))
never_button.clicked.connect(self.show_never)
buttons.addWidget(never_button)
close_button = QPushButton(_('Not Now'))
close_button.clicked.connect(self.close)
buttons.addWidget(close_button)
main_box.addLayout(buttons)
self.setLayout(main_box)
self.show()
def send_report(self):
if constants.net.GENESIS[-4:] not in ["29a0", "bfe2"] and ".electrum-ltc.org" in report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
self.main_window.show_critical(_("Please report this issue manually."))
return
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
try:
response = requests.post(report_server, data=report, timeout=20)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.main_window.show_critical(_('There was a problem with the automatic reporting:') + '\n' +
str(e) + '\n' +
_("Please report this issue manually."))
return
else:
QMessageBox.about(self, "Crash report", response.text)
self.close()
def on_close(self):
Exception_Window._active_window = None
sys.__excepthook__(*self.exc_args)
self.close()
def show_never(self):
self.main_window.config.set_key("show_crash_reporter", False)
self.close()
def closeEvent(self, event):
self.on_close()
event.accept()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"os": platform.platform(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0],
"description": self.description_textfield.toPlainText()
}
try:
args["wallet_type"] = self.main_window.wallet.wallet_type
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = "".join(traceback.format_exception(*self.exc_args))
return issue_template.format(**info)
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(['git', 'describe', '--always'], cwd=dir)
return str(version, "utf8").strip()
def _show_window(*args):
if not Exception_Window._active_window:
Exception_Window._active_window = Exception_Window(*args)
class Exception_Hook(QObject):
_report_exception = QtCore.pyqtSignal(object, object, object, object)
def __init__(self, main_window, *args, **kwargs):
super(Exception_Hook, self).__init__(*args, **kwargs)
if not main_window.config.get("show_crash_reporter", default=True):
return
self.main_window = main_window
sys.excepthook = self.handler
self._report_exception.connect(_show_window)
def handler(self, *args):
self._report_exception.emit(self.main_window, *args)
|
the-stack_106_21473
|
from PyQt5.QtWidgets import QWidget, QGridLayout, QLabel, QTreeView, QDialog, QLineEdit
from PyQt5.QtCore import QSortFilterProxyModel
from PyQt5.QtCore import pyqtSignal, Qt
from AlertsParameters.Categories.CategoryTreeItem import CategoryTreeItem
from TreeModel import TreeModel
from Alerts.Alert import Alert
class CategoriesFilterProxy(QSortFilterProxyModel):
def __init__(self):
super().__init__()
self.textFilter = ""
def setCategoriesFilter(self, text):
self.textFilter = text
def filterAcceptsRow(self, row, parent):
index = self.sourceModel().index(row, 0, parent)
item = index.internalPointer()
def filterInChildren(self, item):
ret = self.filterRegExp().indexIn(item.category.name) != -1
if ret:
return True
for child in item.children:
ret = filterInChildren(self, child)
if ret:
return True
return False
return filterInChildren(self, item)
class CategoriesWidget(QWidget):
def __init__(self, categoriesTreeItems):
super().__init__()
self.setLayout(QGridLayout())
self.categoriesTreeItems = categoriesTreeItems
self.categoriesView = QTreeView()
self.categoriesView.header().hide()
self.categoriesModel = TreeModel(self.categoriesTreeItems)
self.categoriesProxyModel = CategoriesFilterProxy()
self.categoriesProxyModel.setSourceModel(self.categoriesModel)
self.categoriesView.setModel(self.categoriesProxyModel)
self.filterCategoriesEdit = QLineEdit()
self.filterCategoriesEdit.setPlaceholderText("Filter categories")
self.categoriesProxyModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.filterCategoriesEdit.textEdited.connect(self.categoriesProxyModel.setFilterRegExp)
self.layout().addWidget(self.filterCategoriesEdit, 0, 1, 1, 1)
self.layout().addWidget(self.categoriesView, 1, 1, 1, 1)
|
the-stack_106_21474
|
"""Support for RFXtrx devices."""
import asyncio
import binascii
from collections import OrderedDict
import logging
import RFXtrx as rfxtrxmod
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_DEVICE_ID,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
POWER_WATT,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
UV_INDEX,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_EVENT,
DEVICE_PACKET_TYPE_LIGHTING4,
EVENT_RFXTRX_EVENT,
SERVICE_SEND,
)
DOMAIN = "rfxtrx"
DEFAULT_SIGNAL_REPETITIONS = 1
CONF_FIRE_EVENT = "fire_event"
CONF_DATA_BITS = "data_bits"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_DEBUG = "debug"
CONF_OFF_DELAY = "off_delay"
SIGNAL_EVENT = f"{DOMAIN}_event"
DATA_TYPES = OrderedDict(
[
("Temperature", TEMP_CELSIUS),
("Temperature2", TEMP_CELSIUS),
("Humidity", UNIT_PERCENTAGE),
("Barometer", ""),
("Wind direction", ""),
("Rain rate", ""),
("Energy usage", POWER_WATT),
("Total usage", POWER_WATT),
("Sound", ""),
("Sensor Status", ""),
("Counter value", ""),
("UV", UV_INDEX),
("Humidity status", ""),
("Forecast", ""),
("Forecast numeric", ""),
("Rain total", ""),
("Wind average speed", ""),
("Wind gust", ""),
("Chill", ""),
("Total usage", ""),
("Count", ""),
("Current Ch. 1", ""),
("Current Ch. 2", ""),
("Current Ch. 3", ""),
("Energy usage", ""),
("Voltage", ""),
("Current", ""),
("Battery numeric", UNIT_PERCENTAGE),
("Rssi numeric", "dBm"),
]
)
_LOGGER = logging.getLogger(__name__)
DATA_RFXOBJECT = "rfxobject"
DATA_LISTENER = "ha_stop"
def _bytearray_string(data):
val = cv.string(data)
try:
return bytearray.fromhex(val)
except ValueError:
raise vol.Invalid("Data must be a hex string with multiple of two characters")
def _ensure_device(value):
if value is None:
return DEVICE_DATA_SCHEMA({})
return DEVICE_DATA_SCHEMA(value)
SERVICE_SEND_SCHEMA = vol.Schema({ATTR_EVENT: _bytearray_string})
DEVICE_DATA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_OFF_DELAY): vol.All(
cv.time_period, cv.positive_timedelta, lambda value: value.total_seconds()
),
vol.Optional(CONF_DATA_BITS): cv.positive_int,
vol.Optional(CONF_COMMAND_ON): cv.byte,
vol.Optional(CONF_COMMAND_OFF): cv.byte,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=1): cv.positive_int,
}
)
BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEBUG, default=False): cv.boolean,
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {cv.string: _ensure_device},
}
)
DEVICE_SCHEMA = BASE_SCHEMA.extend({vol.Required(CONF_DEVICE): cv.string})
PORT_SCHEMA = BASE_SCHEMA.extend(
{vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_HOST): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Any(DEVICE_SCHEMA, PORT_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
DOMAINS = ["switch", "sensor", "light", "binary_sensor", "cover"]
async def async_setup(hass, config):
"""Set up the RFXtrx component."""
if DOMAIN not in config:
return True
data = {
CONF_HOST: config[DOMAIN].get(CONF_HOST),
CONF_PORT: config[DOMAIN].get(CONF_PORT),
CONF_DEVICE: config[DOMAIN].get(CONF_DEVICE),
CONF_DEBUG: config[DOMAIN].get(CONF_DEBUG),
CONF_AUTOMATIC_ADD: config[DOMAIN].get(CONF_AUTOMATIC_ADD),
CONF_DEVICES: config[DOMAIN][CONF_DEVICES],
}
# Read device_id from the event code add to the data that will end up in the ConfigEntry
for event_code, event_config in data[CONF_DEVICES].items():
event = get_rfx_object(event_code)
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
event_config[CONF_DEVICE_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=data,
)
)
return True
async def async_setup_entry(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
hass.data.setdefault(DOMAIN, {})
await async_setup_internal(hass, entry)
for domain in DOMAINS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, domain)
)
return True
async def async_unload_entry(hass, entry: config_entries.ConfigEntry):
"""Unload RFXtrx component."""
if not all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in DOMAINS
]
)
):
return False
hass.services.async_remove(DOMAIN, SERVICE_SEND)
listener = hass.data[DOMAIN][DATA_LISTENER]
listener()
rfx_object = hass.data[DOMAIN][DATA_RFXOBJECT]
await hass.async_add_executor_job(rfx_object.close_connection)
return True
def _create_rfx(config):
"""Construct a rfx object based on config."""
if config[CONF_PORT] is not None:
# If port is set then we create a TCP connection
rfx = rfxtrxmod.Connect(
(config[CONF_HOST], config[CONF_PORT]),
None,
debug=config[CONF_DEBUG],
transport_protocol=rfxtrxmod.PyNetworkTransport,
)
else:
rfx = rfxtrxmod.Connect(config[CONF_DEVICE], None, debug=config[CONF_DEBUG])
return rfx
def _get_device_lookup(devices):
"""Get a lookup structure for devices."""
lookup = dict()
for event_code, event_config in devices.items():
event = get_rfx_object(event_code)
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
lookup[device_id] = event_config
return lookup
async def async_setup_internal(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
config = entry.data
# Initialize library
rfx_object = await hass.async_add_executor_job(_create_rfx, config)
# Setup some per device config
devices = _get_device_lookup(config[CONF_DEVICES])
# Declare the Handle event
@callback
def async_handle_receive(event):
"""Handle received messages from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
event_data = {
"packet_type": event.device.packettype,
"sub_type": event.device.subtype,
"type_string": event.device.type_string,
"id_string": event.device.id_string,
"data": binascii.hexlify(event.data).decode("ASCII"),
"values": getattr(event, "values", None),
}
_LOGGER.debug("Receive RFXCOM event: %s", event_data)
data_bits = get_device_data_bits(event.device, devices)
device_id = get_device_id(event.device, data_bits=data_bits)
# Register new devices
if config[CONF_AUTOMATIC_ADD] and device_id not in devices:
_add_device(event, device_id)
# Callback to HA registered components.
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_EVENT, event, device_id)
# Signal event to any other listeners
fire_event = devices.get(device_id, {}).get(CONF_FIRE_EVENT)
if fire_event:
hass.bus.async_fire(EVENT_RFXTRX_EVENT, event_data)
@callback
def _add_device(event, device_id):
"""Add a device to config entry."""
config = DEVICE_DATA_SCHEMA({})
config[CONF_DEVICE_ID] = device_id
data = entry.data.copy()
event_code = binascii.hexlify(event.data).decode("ASCII")
data[CONF_DEVICES][event_code] = config
hass.config_entries.async_update_entry(entry=entry, data=data)
devices[device_id] = config
def _shutdown_rfxtrx(event):
"""Close connection with RFXtrx."""
rfx_object.close_connection()
listener = hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx)
hass.data[DOMAIN][DATA_LISTENER] = listener
hass.data[DOMAIN][DATA_RFXOBJECT] = rfx_object
rfx_object.event_callback = lambda event: hass.add_job(async_handle_receive, event)
def send(call):
event = call.data[ATTR_EVENT]
rfx_object.transport.send(event)
hass.services.async_register(DOMAIN, SERVICE_SEND, send, schema=SERVICE_SEND_SCHEMA)
def get_rfx_object(packetid):
"""Return the RFXObject with the packetid."""
try:
binarypacket = bytearray.fromhex(packetid)
except ValueError:
return None
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is None:
return None
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
obj.data = binarypacket
return obj
def get_pt2262_deviceid(device_id, nb_data_bits):
"""Extract and return the address bits from a Lighting4/PT2262 packet."""
if nb_data_bits is None:
return
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ~((1 << nb_data_bits) - 1)
data[len(data) - 1] &= mask
return binascii.hexlify(data)
def get_pt2262_cmd(device_id, data_bits):
"""Extract and return the data bits from a Lighting4/PT2262 packet."""
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ((1 << data_bits) - 1)
return hex(data[-1] & mask)
def get_device_data_bits(device, devices):
"""Deduce data bits for device based on a cache of device bits."""
data_bits = None
if device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
for device_id, entity_config in devices.items():
bits = entity_config.get(CONF_DATA_BITS)
if get_device_id(device, bits) == device_id:
data_bits = bits
break
return data_bits
def find_possible_pt2262_device(device_ids, device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id in device_ids:
if len(dev_id) == len(device_id):
size = None
for i, (char1, char2) in enumerate(zip(dev_id, device_id)):
if char1 != char2:
break
size = i
if size is not None:
size = len(dev_id) - size - 1
_LOGGER.info(
"rfxtrx: found possible device %s for %s "
"with the following configuration:\n"
"data_bits=%d\n"
"command_on=0x%s\n"
"command_off=0x%s\n",
device_id,
dev_id,
size * 4,
dev_id[-size:],
device_id[-size:],
)
return dev_id
return None
def get_device_id(device, data_bits=None):
"""Calculate a device id for device."""
id_string = device.id_string
if data_bits and device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
masked_id = get_pt2262_deviceid(id_string, data_bits)
if masked_id:
id_string = masked_id.decode("ASCII")
return (f"{device.packettype:x}", f"{device.subtype:x}", id_string)
class RfxtrxEntity(RestoreEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, event=None):
"""Initialize the device."""
self._name = f"{device.type_string} {device.id_string}"
self._device = device
self._event = event
self._device_id = device_id
self._unique_id = "_".join(x for x in self._device_id)
async def async_added_to_hass(self):
"""Restore RFXtrx device state (ON/OFF)."""
if self._event:
self._apply_event(self._event)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_EVENT, self._handle_event
)
)
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if not self._event:
return None
return {ATTR_EVENT: "".join(f"{x:02x}" for x in self._event.data)}
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def unique_id(self):
"""Return unique identifier of remote device."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, *self._device_id)},
"name": f"{self._device.type_string} {self._device.id_string}",
"model": self._device.type_string,
}
def _apply_event(self, event):
"""Apply a received event."""
self._event = event
@callback
def _handle_event(self, event, device_id):
"""Handle a reception of data, overridden by other classes."""
class RfxtrxCommandEntity(RfxtrxEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, signal_repetitions=1, event=None):
"""Initialzie a switch or light device."""
super().__init__(device, device_id, event=event)
self.signal_repetitions = signal_repetitions
self._state = None
async def _async_send(self, fun, *args):
rfx_object = self.hass.data[DOMAIN][DATA_RFXOBJECT]
for _ in range(self.signal_repetitions):
await self.hass.async_add_executor_job(fun, rfx_object.transport, *args)
|
the-stack_106_21475
|
def menu():
option = str(input("Choose a option, use the numbers\n1) Register a client\n2) See the client list\n3) Exit\n"))
while option != "1" and option != "2" and option != "3":
option = str(input("Choose a option, use ONLY this numbers\n1) Register a client\n2) See the client list\n3) Exit\n"))
if option == "1":
registration()
elif option == "2":
reg = open("customer.txt", "r")
print(reg.read())
reg.close
menu()
def registration (reg = open("customer.txt", "a")):
name = str(input("Write the client's name\n"))
name = name.title()
age = str(input("Write the client's age\n"))
phone = str(input("Write the client's phone number\n"))
txt = f"NAME = {name}, AGE = {age}, PHONE NUMBER = {phone}\n"
reg.write(txt)
reg.close
question()
def question():
option = str(input("Do you want to register another customer?\n1) Yes\n2) No\n"))
while option != "1" and option != "2":
option = str(input("Choose a option, use ONLY this numbers\n1) Yes\n2) No\n"))
if option == "1":
registration()
elif option == "2":
print("The program will restart to save the list")
|
the-stack_106_21476
|
#! /usr/bin/env python3
import pathlib
import typing
import sys
"""
List all of the machines available under the listed sub-layers of meta-arm.
"""
def list_machines(layers: typing.Sequence[str]) -> typing.Set[str]:
machines = set()
# We know we're in meta-arm/scripts, so find the top-level directory
metaarm = pathlib.Path(__file__).resolve().parent.parent
if metaarm.name != "meta-arm":
raise Exception("Not running inside meta-arm")
for layer in layers:
machines |= set(p.stem for p in (metaarm / layer / "conf" / "machine").glob("*.conf"))
return machines
if __name__ == "__main__":
if len(sys.argv) > 1:
machines = list_machines(sys.argv[1:])
print(" ".join(sorted(machines)))
sys.exit(0)
else:
print("Usage:\n$ %s [layer name ...] " % sys.argv[0])
sys.exit(1)
|
the-stack_106_21477
|
"""
The file name should contain the representitive class/struct name.
If the file contains class/struct decls or defs, the file name should be
one of classes.
If the class/struct name starts with "C", "C" can be ommited in the file name.
== Vilolation ==
= a.h = <== Violation. It should contain class name 'TestClass'
class TestClass() {
}
= a.cpp = <== Violation. It should contain class name 'Test'
void Test::Method1() {
}
== Good ==
= TestClass.h = <== OK
class TestClass {
}
= Class1.h = <== OK.
class CClass1 {
}
= TestClass.cpp = <== OK
void TestClass::Method1() {
}
"""
from nsiqcppstyle_rulemanager import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
try :
set()
except NameError:
from sets import Set as set
classname = None
def RunFunctionNameRule(lexer, fullName, decl, contextStack, context) :
names = fullName.split("::")
if len(names) > 1 :
if len(names[0]) != 0 :
classname.add(names[0])
def RunTypeNameRule(lexer, currentType, fullName, decl, contextStack, context) :
if currentType in ["CLASS", "STRUCT"] :
names = fullName.split("::")
if len(names[-1]) != 0 :
classname.add(names[-1])
def RunFileStartRule(lexer, filename, dirname) :
global classname
classname = set()
def RunFileEndRule(lexer, filename, dirname):
goodFileName = False
filename = filename.lower( )
if len(classname) == 0 : return
for t in classname :
if t.startswith("C") :
t = t[1:]
if filename.find(t.lower()) != -1 :
goodFileName = True
break
if not goodFileName :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0, 0), __name__,
"The filename does not represent the classnames (%s)" %(classname))
ruleManager.AddFileStartRule(RunFileStartRule)
ruleManager.AddTypeNameRule(RunTypeNameRule)
ruleManager.AddFunctionNameRule(RunFunctionNameRule)
ruleManager.AddFileEndRule(RunFileEndRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunFileStartRule)
ruleManager.AddTypeNameRule(RunTypeNameRule)
ruleManager.AddFunctionNameRule(RunFunctionNameRule)
ruleManager.AddFileEndRule(RunFileEndRule)
def test1(self):
self.Analyze("test/aa.c",
"""
void AA::DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/ab.c",
"""
void AA::DSD() {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/aa.c",
"""
void CAA::DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/aa.c",
"""
void DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/aa.cpp",
"""
struct AA {
}
class BB {
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/aa.cpp",
"""
struct AA1 {
}
class BB {
}
""")
assert CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/CamRecorderFactory.cpp",
"""
class __declspec(dllexport) CCamRecorderFactory
{
};
""")
assert not CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/CamRecorderFactory.cpp",
"""
class DLLEXPORT CCamRecorderFactory
{
};
""")
assert not CheckErrorContent(__name__)
|
the-stack_106_21478
|
"""
predict trades for one specific day
"""
import functools
import logging
from datetime import datetime, timedelta
import pandas as pd
import tensorflow as tf
from absl import app
from absl import logging as absl_logging
from tf_agents.system import system_multiprocessing as multiprocessing
from config import settings
from envirement.trading_py_env import TradingPyEnv
from models.model_ppo import TradeDRLAgent
from preprocess_data import preprocess_data
def main(_):
PREDICT_DAY = "2020-06-01"
days_to_subtract = 60
ticker_list = settings.DOW_30_TICKER
data_columns = settings.DATA_COLUMNS
# Preprocess data
df_trade = preprocess_data.preprocess_data(
tic_list=ticker_list,
start_date=str(datetime.strptime(PREDICT_DAY, '%Y-%m-%d')
- timedelta(days=days_to_subtract)),
end_date=PREDICT_DAY,
field_mappings=settings.CSV_FIELD_MAPPINGS,
baseline_filed_mappings=settings.BASELINE_FIELD_MAPPINGS,
csv_file_info=settings.CSV_FILE_SETTINGS,
user_columns=settings.USER_DEFINED_FEATURES
)
information_cols = []
unavailable_cols = []
for col in data_columns:
if col in df_trade.columns:
information_cols.append(col)
else:
unavailable_cols.append(col)
if not information_cols:
logging.error('No column to train')
raise ValueError
else:
logging.info(f'Columns used to train:\n{information_cols} โ
')
if unavailable_cols:
logging.info(f'Unavailable columns:\n{unavailable_cols} โ')
# df_trade[information_cols].to_csv("temp.csv", index=1, encoding="utf-8")
logging.info(f'TensorFlow v{tf.version.VERSION}')
logging.info(
f"Available [GPU] devices:\n{tf.config.list_physical_devices('GPU')}")
# Predict
test_py_env = TradingPyEnv(
df=df_trade,
daily_information_cols=information_cols,
)
model = TradeDRLAgent()
_, df_actions = model.test_trade(env=test_py_env)
assert len(df_trade.tic.unique()) == len(
df_actions.tail(1).transactions.values[0])
pred_inf_df = pd.DataFrame(
{'ticker': df_trade.tic.unique()}
)
pred_inf_df['trade'] = pd.Series(df_actions.tail(1).transactions.values[0])
last_day = pd.to_datetime(str(df_actions.tail(1).date.values[0]))
last_day_str = last_day.strftime("%B %d, %Y")
logging.info(f'\nPredicted trades for {last_day_str}:\n{pred_inf_df}')
if __name__ == '__main__':
# FMT = '[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s'
FMT = '[%(levelname)s] %(message)s'
formatter = logging.Formatter(FMT)
absl_logging.get_absl_handler().setFormatter(formatter)
absl_logging.set_verbosity('info')
# logging.basicConfig(format='%(message)s', level=logging.INFO)
multiprocessing.handle_main(functools.partial(app.run, main))
|
the-stack_106_21482
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.irc}.
"""
import time
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.words.protocols import irc
from twisted.words.protocols.irc import IRCClient
from twisted.internet import protocol
from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing
class ModeParsingTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.irc.parseModes}.
"""
paramModes = ('klb', 'b')
def test_emptyModes(self):
"""
Parsing an empty mode string raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '', [])
def test_emptyModeSequence(self):
"""
Parsing a mode string that contains an empty sequence (either a C{+} or
C{-} followed directly by another C{+} or C{-}, or not followed by
anything at all) raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '++k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-+k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_malformedModes(self):
"""
Parsing a mode string that does not start with C{+} or C{-} raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, 'foo', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '%', [])
def test_nullModes(self):
"""
Parsing a mode string that contains no mode characters raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_singleMode(self):
"""
Parsing a single mode setting with no parameters results in that mode,
with no parameters, in the "added" direction and no modes in the
"removed" direction.
"""
added, removed = irc.parseModes('+s', [])
self.assertEquals(added, [('s', None)])
self.assertEquals(removed, [])
added, removed = irc.parseModes('-s', [])
self.assertEquals(added, [])
self.assertEquals(removed, [('s', None)])
def test_singleDirection(self):
"""
Parsing a single-direction mode setting with multiple modes and no
parameters, results in all modes falling into the same direction group.
"""
added, removed = irc.parseModes('+stn', [])
self.assertEquals(added, [('s', None),
('t', None),
('n', None)])
self.assertEquals(removed, [])
added, removed = irc.parseModes('-nt', [])
self.assertEquals(added, [])
self.assertEquals(removed, [('n', None),
('t', None)])
def test_multiDirection(self):
"""
Parsing a multi-direction mode setting with no parameters.
"""
added, removed = irc.parseModes('+s-n+ti', [])
self.assertEquals(added, [('s', None),
('t', None),
('i', None)])
self.assertEquals(removed, [('n', None)])
def test_consecutiveDirection(self):
"""
Parsing a multi-direction mode setting containing two consecutive mode
sequences with the same direction results in the same result as if
there were only one mode sequence in the same direction.
"""
added, removed = irc.parseModes('+sn+ti', [])
self.assertEquals(added, [('s', None),
('n', None),
('t', None),
('i', None)])
self.assertEquals(removed, [])
def test_mismatchedParams(self):
"""
If the number of mode parameters does not match the number of modes
expecting parameters, L{irc.IRCBadModes} is raised.
"""
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+k', [],
self.paramModes)
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+kl', ['foo', '10', 'lulz_extra_param'],
self.paramModes)
def test_parameters(self):
"""
Modes which require parameters are parsed and paired with their relevant
parameter, modes which do not require parameters do not consume any of
the parameters.
"""
added, removed = irc.parseModes(
'+klbb',
['somekey', '42', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [('k', 'somekey'),
('l', '42'),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEquals(removed, [])
added, removed = irc.parseModes(
'-klbb',
['nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [])
self.assertEquals(removed, [('k', None),
('l', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
# Mix a no-argument mode in with argument modes.
added, removed = irc.parseModes(
'+knbb',
['somekey', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [('k', 'somekey'),
('n', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEquals(removed, [])
stringSubjects = [
"Hello, this is a nice string with no complications.",
"xargs%(NUL)smight%(NUL)slike%(NUL)sthis" % {'NUL': irc.NUL },
"embedded%(CR)snewline%(CR)s%(NL)sFUN%(NL)s" % {'CR': irc.CR,
'NL': irc.NL},
"escape!%(X)s escape!%(M)s %(X)s%(X)sa %(M)s0" % {'X': irc.X_QUOTE,
'M': irc.M_QUOTE}
]
class QuotingTest(unittest.TestCase):
def test_lowquoteSanity(self):
"""Testing client-server level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.lowDequote(irc.lowQuote(s)))
def test_ctcpquoteSanity(self):
"""Testing CTCP message level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.ctcpDequote(irc.ctcpQuote(s)))
class Dispatcher(irc._CommandDispatcherMixin):
"""
A dispatcher that exposes one known command and handles unknown commands.
"""
prefix = 'disp'
def disp_working(self, a, b):
"""
A known command that returns its input.
"""
return a, b
def disp_unknown(self, name, a, b):
"""
Handle unknown commands by returning their name and inputs.
"""
return name, a, b
class DispatcherTests(unittest.TestCase):
"""
Tests for L{irc._CommandDispatcherMixin}.
"""
def test_dispatch(self):
"""
Dispatching a command invokes the correct handler.
"""
disp = Dispatcher()
args = (1, 2)
res = disp.dispatch('working', *args)
self.assertEquals(res, args)
def test_dispatchUnknown(self):
"""
Dispatching an unknown command invokes the default handler.
"""
disp = Dispatcher()
name = 'missing'
args = (1, 2)
res = disp.dispatch(name, *args)
self.assertEquals(res, (name,) + args)
def test_dispatchMissingUnknown(self):
"""
Dispatching an unknown command, when no default handler is present,
results in an exception being raised.
"""
disp = Dispatcher()
disp.disp_unknown = None
self.assertRaises(irc.UnhandledCommand, disp.dispatch, 'bar')
class ServerSupportedFeatureTests(unittest.TestCase):
"""
Tests for L{ServerSupportedFeatures} and related functions.
"""
def test_intOrDefault(self):
"""
L{_intOrDefault} converts values to C{int} if possible, otherwise
returns a default value.
"""
self.assertEquals(irc._intOrDefault(None), None)
self.assertEquals(irc._intOrDefault([]), None)
self.assertEquals(irc._intOrDefault(''), None)
self.assertEquals(irc._intOrDefault('hello', 5), 5)
self.assertEquals(irc._intOrDefault('123'), 123)
self.assertEquals(irc._intOrDefault(123), 123)
def test_splitParam(self):
"""
L{ServerSupportedFeatures._splitParam} splits ISUPPORT parameters
into key and values. Parameters without a separator are split into a
key and a list containing only the empty string. Escaped parameters
are unescaped.
"""
params = [('FOO', ('FOO', [''])),
('FOO=', ('FOO', [''])),
('FOO=1', ('FOO', ['1'])),
('FOO=1,2,3', ('FOO', ['1', '2', '3'])),
('FOO=A\\x20B', ('FOO', ['A B'])),
('FOO=\\x5Cx', ('FOO', ['\\x'])),
('FOO=\\', ('FOO', ['\\'])),
('FOO=\\n', ('FOO', ['\\n']))]
_splitParam = irc.ServerSupportedFeatures._splitParam
for param, expected in params:
res = _splitParam(param)
self.assertEquals(res, expected)
self.assertRaises(ValueError, _splitParam, 'FOO=\\x')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xNN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\x20\\x')
def test_splitParamArgs(self):
"""
L{ServerSupportedFeatures._splitParamArgs} splits ISUPPORT parameter
arguments into key and value. Arguments without a separator are
split into a key and an empty string.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C:', 'D'])
self.assertEquals(res, [('A', '1'),
('B', '2'),
('C', ''),
('D', '')])
def test_splitParamArgsProcessor(self):
"""
L{ServerSupportedFeatures._splitParamArgs} uses the argument processor
passed to to convert ISUPPORT argument values to some more suitable
form.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C'],
irc._intOrDefault)
self.assertEquals(res, [('A', 1),
('B', 2),
('C', None)])
def test_parsePrefixParam(self):
"""
L{ServerSupportedFeatures._parsePrefixParam} parses the ISUPPORT PREFIX
parameter into a mapping from modes to prefix symbols, returns
C{None} if there is no parseable prefix parameter or raises
C{ValueError} if the prefix parameter is malformed.
"""
_parsePrefixParam = irc.ServerSupportedFeatures._parsePrefixParam
self.assertEquals(_parsePrefixParam(''), None)
self.assertRaises(ValueError, _parsePrefixParam, 'hello')
self.assertEquals(_parsePrefixParam('(ov)@+'),
{'o': ('@', 0),
'v': ('+', 1)})
def test_parseChanModesParam(self):
"""
L{ServerSupportedFeatures._parseChanModesParam} parses the ISUPPORT
CHANMODES parameter into a mapping from mode categories to mode
characters. Passing fewer than 4 parameters results in the empty string
for the relevant categories. Passing more than 4 parameters raises
C{ValueError}.
"""
_parseChanModesParam = irc.ServerSupportedFeatures._parseChanModesParam
self.assertEquals(
_parseChanModesParam([]),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEquals(
_parseChanModesParam(['b', 'k', 'l', 'imnpst']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': 'imnpst'})
self.assertEquals(
_parseChanModesParam(['b', 'k', 'l']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': ''})
self.assertRaises(
ValueError,
_parseChanModesParam, ['a', 'b', 'c', 'd', 'e'])
def test_parse(self):
"""
L{ServerSupportedFeatures.parse} changes the internal state of the
instance to reflect the features indicated by the parsed ISUPPORT
parameters, including unknown parameters and unsetting previously set
parameters.
"""
supported = irc.ServerSupportedFeatures()
supported.parse(['MODES=4',
'CHANLIMIT=#:20,&:10',
'INVEX',
'EXCEPTS=Z',
'UNKNOWN=A,B,C'])
self.assertEquals(supported.getFeature('MODES'), 4)
self.assertEquals(supported.getFeature('CHANLIMIT'),
[('#', 20),
('&', 10)])
self.assertEquals(supported.getFeature('INVEX'), 'I')
self.assertEquals(supported.getFeature('EXCEPTS'), 'Z')
self.assertEquals(supported.getFeature('UNKNOWN'), ('A', 'B', 'C'))
self.assertTrue(supported.hasFeature('INVEX'))
supported.parse(['-INVEX'])
self.assertFalse(supported.hasFeature('INVEX'))
# Unsetting a previously unset parameter should not be a problem.
supported.parse(['-INVEX'])
def _parse(self, features):
"""
Parse all specified features according to the ISUPPORT specifications.
@type features: C{list} of C{(featureName, value)}
@param features: Feature names and values to parse
@rtype: L{irc.ServerSupportedFeatures}
"""
supported = irc.ServerSupportedFeatures()
features = ['%s=%s' % (name, value or '')
for name, value in features]
supported.parse(features)
return supported
def _parseFeature(self, name, value=None):
"""
Parse a feature, with the given name and value, according to the
ISUPPORT specifications and return the parsed value.
"""
supported = self._parse([(name, value)])
return supported.getFeature(name)
def _testIntOrDefaultFeature(self, name, default=None):
"""
Perform some common tests on a feature known to use L{_intOrDefault}.
"""
self.assertEquals(
self._parseFeature(name, None),
default)
self.assertEquals(
self._parseFeature(name, 'notanint'),
default)
self.assertEquals(
self._parseFeature(name, '42'),
42)
def _testFeatureDefault(self, name, features=None):
"""
Features known to have default values are reported as being present by
L{irc.ServerSupportedFeatures.hasFeature}, and their value defaults
correctly, when they don't appear in an ISUPPORT message.
"""
default = irc.ServerSupportedFeatures()._features[name]
if features is None:
features = [('DEFINITELY_NOT', 'a_feature')]
supported = self._parse(features)
self.assertTrue(supported.hasFeature(name))
self.assertEquals(supported.getFeature(name), default)
def test_support_CHANMODES(self):
"""
The CHANMODES ISUPPORT parameter is parsed into a C{dict} giving the
four mode categories, C{'addressModes'}, C{'param'}, C{'setParam'}, and
C{'noParam'}.
"""
self._testFeatureDefault('CHANMODES')
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,')])
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,ha,ha')])
self.assertEquals(
self._parseFeature('CHANMODES', ''),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEquals(
self._parseFeature('CHANMODES', ',A'),
{'addressModes': '',
'param': 'A',
'setParam': '',
'noParam': ''})
self.assertEquals(
self._parseFeature('CHANMODES', 'A,Bc,Def,Ghij'),
{'addressModes': 'A',
'param': 'Bc',
'setParam': 'Def',
'noParam': 'Ghij'})
def test_support_IDCHAN(self):
"""
The IDCHAN support parameter is parsed into a sequence of two-tuples
giving channel prefix and ID length pairs.
"""
self.assertEquals(
self._parseFeature('IDCHAN', '!:5'),
[('!', '5')])
def test_support_MAXLIST(self):
"""
The MAXLIST support parameter is parsed into a sequence of two-tuples
giving modes and their limits.
"""
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50'),
[('b', 25), ('eI', 50)])
# A non-integer parameter argument results in None.
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:3.1415'),
[('b', 25), ('eI', 50), ('a', None)])
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:notanint'),
[('b', 25), ('eI', 50), ('a', None)])
def test_support_NETWORK(self):
"""
The NETWORK support parameter is parsed as the network name, as
specified by the server.
"""
self.assertEquals(
self._parseFeature('NETWORK', 'IRCNet'),
'IRCNet')
def test_support_SAFELIST(self):
"""
The SAFELIST support parameter is parsed into a boolean indicating
whether the safe "list" command is supported or not.
"""
self.assertEquals(
self._parseFeature('SAFELIST'),
True)
def test_support_STATUSMSG(self):
"""
The STATUSMSG support parameter is parsed into a string of channel
status that support the exclusive channel notice method.
"""
self.assertEquals(
self._parseFeature('STATUSMSG', '@+'),
'@+')
def test_support_TARGMAX(self):
"""
The TARGMAX support parameter is parsed into a dictionary, mapping
strings to integers, of the maximum number of targets for a particular
command.
"""
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3'),
{'PRIVMSG': 4,
'NOTICE': 3})
# A non-integer parameter argument results in None.
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:3.1415'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:notanint'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
def test_support_NICKLEN(self):
"""
The NICKLEN support parameter is parsed into an integer value
indicating the maximum length of a nickname the client may use,
otherwise, if the parameter is missing or invalid, the default value
(as specified by RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['NICKLEN']
self._testIntOrDefaultFeature('NICKLEN', default)
def test_support_CHANNELLEN(self):
"""
The CHANNELLEN support parameter is parsed into an integer value
indicating the maximum channel name length, otherwise, if the
parameter is missing or invalid, the default value (as specified by
RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['CHANNELLEN']
self._testIntOrDefaultFeature('CHANNELLEN', default)
def test_support_CHANTYPES(self):
"""
The CHANTYPES support parameter is parsed into a tuple of
valid channel prefix characters.
"""
self._testFeatureDefault('CHANTYPES')
self.assertEquals(
self._parseFeature('CHANTYPES', '#&%'),
('#', '&', '%'))
def test_support_KICKLEN(self):
"""
The KICKLEN support parameter is parsed into an integer value
indicating the maximum length of a kick message a client may use.
"""
self._testIntOrDefaultFeature('KICKLEN')
def test_support_PREFIX(self):
"""
The PREFIX support parameter is parsed into a dictionary mapping
modes to two-tuples of status symbol and priority.
"""
self._testFeatureDefault('PREFIX')
self._testFeatureDefault('PREFIX', [('PREFIX', 'hello')])
self.assertEquals(
self._parseFeature('PREFIX', None),
None)
self.assertEquals(
self._parseFeature('PREFIX', '(ohv)@%+'),
{'o': ('@', 0),
'h': ('%', 1),
'v': ('+', 2)})
self.assertEquals(
self._parseFeature('PREFIX', '(hov)@%+'),
{'o': ('%', 1),
'h': ('@', 0),
'v': ('+', 2)})
def test_support_TOPICLEN(self):
"""
The TOPICLEN support parameter is parsed into an integer value
indicating the maximum length of a topic a client may set.
"""
self._testIntOrDefaultFeature('TOPICLEN')
def test_support_MODES(self):
"""
The MODES support parameter is parsed into an integer value
indicating the maximum number of "variable" modes (defined as being
modes from C{addressModes}, C{param} or C{setParam} categories for
the C{CHANMODES} ISUPPORT parameter) which may by set on a channel
by a single MODE command from a client.
"""
self._testIntOrDefaultFeature('MODES')
def test_support_EXCEPTS(self):
"""
The EXCEPTS support parameter is parsed into the mode character
to be used for "ban exception" modes. If no parameter is specified
then the character C{e} is assumed.
"""
self.assertEquals(
self._parseFeature('EXCEPTS', 'Z'),
'Z')
self.assertEquals(
self._parseFeature('EXCEPTS'),
'e')
def test_support_INVEX(self):
"""
The INVEX support parameter is parsed into the mode character to be
used for "invite exception" modes. If no parameter is specified then
the character C{I} is assumed.
"""
self.assertEquals(
self._parseFeature('INVEX', 'Z'),
'Z')
self.assertEquals(
self._parseFeature('INVEX'),
'I')
class IRCClientWithoutLogin(irc.IRCClient):
performLogin = 0
class CTCPTest(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = IRCClientWithoutLogin()
self.client.makeConnection(self.transport)
def test_ERRMSG(self):
"""Testing CTCP query ERRMSG.
Not because this is this is an especially important case in the
field, but it does go through the entire dispatch/decode/encode
process.
"""
errQuery = (":[email protected] PRIVMSG #theChan :"
"%(X)cERRMSG t%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
errReply = ("NOTICE nick :%(X)cERRMSG t :"
"No error has occoured.%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
self.client.dataReceived(errQuery)
reply = self.file.getvalue()
self.failUnlessEqual(errReply, reply)
def test_noNumbersVERSION(self):
"""
If attributes for version information on L{IRCClient} are set to
C{None}, the parts of the CTCP VERSION response they correspond to
are omitted.
"""
self.client.versionName = "FrobozzIRC"
self.client.ctcpQuery_VERSION("[email protected]", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s::"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName})
reply = self.file.getvalue()
self.assertEquals(versionReply, reply)
def test_fullVERSION(self):
"""
The response to a CTCP VERSION query includes the version number and
environment information, as specified by L{IRCClient.versionNum} and
L{IRCClient.versionEnv}.
"""
self.client.versionName = "FrobozzIRC"
self.client.versionNum = "1.2g"
self.client.versionEnv = "ZorkOS"
self.client.ctcpQuery_VERSION("[email protected]", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s:%(vnum)s:%(venv)s"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName,
'vnum': self.client.versionNum,
'venv': self.client.versionEnv})
reply = self.file.getvalue()
self.assertEquals(versionReply, reply)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
class NoticingClient(IRCClientWithoutLogin, object):
methods = {
'created': ('when',),
'yourHost': ('info',),
'myInfo': ('servername', 'version', 'umodes', 'cmodes'),
'luserClient': ('info',),
'bounce': ('info',),
'isupport': ('options',),
'luserChannels': ('channels',),
'luserOp': ('ops',),
'luserMe': ('info',),
'receivedMOTD': ('motd',),
'privmsg': ('user', 'channel', 'message'),
'joined': ('channel',),
'left': ('channel',),
'noticed': ('user', 'channel', 'message'),
'modeChanged': ('user', 'channel', 'set', 'modes', 'args'),
'pong': ('user', 'secs'),
'signedOn': (),
'kickedFrom': ('channel', 'kicker', 'message'),
'nickChanged': ('nick',),
'userJoined': ('user', 'channel'),
'userLeft': ('user', 'channel'),
'userKicked': ('user', 'channel', 'kicker', 'message'),
'action': ('user', 'channel', 'data'),
'topicUpdated': ('user', 'channel', 'newTopic'),
'userRenamed': ('oldname', 'newname')}
def __init__(self, *a, **kw):
# It is important that IRCClient.__init__ is not called since
# traditionally it did not exist, so it is important that nothing is
# initialised there that would prevent subclasses that did not (or
# could not) invoke the base implementation. Any protocol
# initialisation should happen in connectionMode.
self.calls = []
def __getattribute__(self, name):
if name.startswith('__') and name.endswith('__'):
return super(NoticingClient, self).__getattribute__(name)
try:
args = super(NoticingClient, self).__getattribute__('methods')[name]
except KeyError:
return super(NoticingClient, self).__getattribute__(name)
else:
return self.makeMethod(name, args)
def makeMethod(self, fname, args):
def method(*a, **kw):
if len(a) > len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
for (name, value) in zip(args, a):
if name in kw:
raise TypeError("TypeError: %s() got multiple values "
"for keyword argument '%s'" % (fname, name))
else:
kw[name] = value
if len(kw) != len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
self.calls.append((fname, kw))
return method
def pop(dict, key, default):
try:
value = dict[key]
except KeyError:
return default
else:
del dict[key]
return value
class ClientImplementationTests(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = NoticingClient()
self.client.makeConnection(self.transport)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
def _serverTestImpl(self, code, msg, func, **kw):
host = pop(kw, 'host', 'server.host')
nick = pop(kw, 'nick', 'nickname')
args = pop(kw, 'args', '')
message = (":" +
host + " " +
code + " " +
nick + " " +
args + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
def testYourHost(self):
msg = "Your host is some.host[blah.blah/6667], running version server-version-3"
self._serverTestImpl("002", msg, "yourHost", info=msg)
def testCreated(self):
msg = "This server was cobbled together Fri Aug 13 18:00:25 UTC 2004"
self._serverTestImpl("003", msg, "created", when=msg)
def testMyInfo(self):
msg = "server.host server-version abcDEF bcdEHI"
self._serverTestImpl("004", msg, "myInfo",
servername="server.host",
version="server-version",
umodes="abcDEF",
cmodes="bcdEHI")
def testLuserClient(self):
msg = "There are 9227 victims and 9542 hiding on 24 servers"
self._serverTestImpl("251", msg, "luserClient",
info=msg)
def _sendISUPPORT(self):
args = ("MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10 HOSTLEN=63 "
"TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23 CHANTYPES=# "
"PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer")
msg = "are available on this server"
self._serverTestImpl("005", msg, "isupport", args=args,
options=['MODES=4',
'CHANLIMIT=#:20',
'NICKLEN=16',
'USERLEN=10',
'HOSTLEN=63',
'TOPICLEN=450',
'KICKLEN=450',
'CHANNELLEN=30',
'KEYLEN=23',
'CHANTYPES=#',
'PREFIX=(ov)@+',
'CASEMAPPING=ascii',
'CAPAB',
'IRCD=dancer'])
def test_ISUPPORT(self):
"""
The client parses ISUPPORT messages sent by the server and calls
L{IRCClient.isupport}.
"""
self._sendISUPPORT()
def testBounce(self):
msg = "Try server some.host, port 321"
self._serverTestImpl("010", msg, "bounce",
info=msg)
def testLuserChannels(self):
args = "7116"
msg = "channels formed"
self._serverTestImpl("254", msg, "luserChannels", args=args,
channels=int(args))
def testLuserOp(self):
args = "34"
msg = "flagged staff members"
self._serverTestImpl("252", msg, "luserOp", args=args,
ops=int(args))
def testLuserMe(self):
msg = "I have 1937 clients and 0 servers"
self._serverTestImpl("255", msg, "luserMe",
info=msg)
def test_receivedMOTD(self):
"""
Lines received in I{RPL_MOTDSTART} and I{RPL_MOTD} are delivered to
L{IRCClient.receivedMOTD} when I{RPL_ENDOFMOTD} is received.
"""
lines = [
":host.name 375 nickname :- host.name Message of the Day -",
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.assertEquals(self.client.calls, [])
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["host.name Message of the Day -", "Welcome to host.name"]})])
# After the motd is delivered, the tracking variable should be
# reset.
self.assertIdentical(self.client.motd, None)
def test_withoutMOTDSTART(self):
"""
If L{IRCClient} receives I{RPL_MOTD} and I{RPL_ENDOFMOTD} without
receiving I{RPL_MOTDSTART}, L{IRCClient.receivedMOTD} is still
called with a list of MOTD lines.
"""
lines = [
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["Welcome to host.name"]})])
def _clientTestImpl(self, sender, group, type, msg, func, **kw):
ident = pop(kw, 'ident', 'ident')
host = pop(kw, 'host', 'host')
wholeUser = sender + '!' + ident + '@' + host
message = (":" +
wholeUser + " " +
type + " " +
group + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
self.client.calls = []
def testPrivmsg(self):
msg = "Tooty toot toot."
self._clientTestImpl("sender", "#group", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="#group",
message=msg)
self._clientTestImpl("sender", "recipient", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="recipient",
message=msg)
def test_getChannelModeParams(self):
"""
L{IRCClient.getChannelModeParams} uses ISUPPORT information, either
given by the server or defaults, to determine which channel modes
require arguments when being added or removed.
"""
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, ['b', 'h', 'k', 'l', 'o', 'v'])
self.assertEquals(remove, ['b', 'h', 'o', 'v'])
def removeFeature(name):
name = '-' + name
msg = "are available on this server"
self._serverTestImpl(
'005', msg, 'isupport', args=name, options=[name])
self.assertIdentical(
self.client.supported.getFeature(name), None)
self.client.calls = []
# Remove CHANMODES feature, causing getFeature('CHANMODES') to return
# None.
removeFeature('CHANMODES')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, ['h', 'o', 'v'])
self.assertEquals(remove, ['h', 'o', 'v'])
# Remove PREFIX feature, causing getFeature('PREFIX') to return None.
removeFeature('PREFIX')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, [])
self.assertEquals(remove, [])
# Restore ISUPPORT features.
self._sendISUPPORT()
self.assertNotIdentical(
self.client.supported.getFeature('PREFIX'), None)
def test_getUserModeParams(self):
"""
L{IRCClient.getUserModeParams} returns a list of user modes (modes that
the user sets on themself, outside of channel modes) that require
parameters when added and removed, respectively.
"""
add, remove = map(sorted, self.client.getUserModeParams())
self.assertEquals(add, [])
self.assertEquals(remove, [])
def _sendModeChange(self, msg, args='', target=None):
"""
Build a MODE string and send it to the client.
"""
if target is None:
target = '#chan'
message = ":[email protected] MODE %s %s %s\r\n" % (
target, msg, args)
self.client.dataReceived(message)
def _parseModeChange(self, results, target=None):
"""
Parse the results, do some test and return the data to check.
"""
if target is None:
target = '#chan'
for n, result in enumerate(results):
method, data = result
self.assertEquals(method, 'modeChanged')
self.assertEquals(data['user'], '[email protected]')
self.assertEquals(data['channel'], target)
results[n] = tuple([data[key] for key in ('set', 'modes', 'args')])
return results
def _checkModeChange(self, expected, target=None):
"""
Compare the expected result with the one returned by the client.
"""
result = self._parseModeChange(self.client.calls, target)
self.assertEquals(result, expected)
self.client.calls = []
def test_modeMissingDirection(self):
"""
Mode strings that do not begin with a directional character, C{'+'} or
C{'-'}, have C{'+'} automatically prepended.
"""
self._sendModeChange('s')
self._checkModeChange([(True, 's', (None,))])
def test_noModeParameters(self):
"""
No parameters are passed to L{IRCClient.modeChanged} for modes that
don't take any parameters.
"""
self._sendModeChange('-s')
self._checkModeChange([(False, 's', (None,))])
self._sendModeChange('+n')
self._checkModeChange([(True, 'n', (None,))])
def test_oneModeParameter(self):
"""
Parameters are passed to L{IRCClient.modeChanged} for modes that take
parameters.
"""
self._sendModeChange('+o', 'a_user')
self._checkModeChange([(True, 'o', ('a_user',))])
self._sendModeChange('-o', 'a_user')
self._checkModeChange([(False, 'o', ('a_user',))])
def test_mixedModes(self):
"""
Mixing adding and removing modes that do and don't take parameters
invokes L{IRCClient.modeChanged} with mode characters and parameters
that match up.
"""
self._sendModeChange('+osv', 'a_user another_user')
self._checkModeChange([(True, 'osv', ('a_user', None, 'another_user'))])
self._sendModeChange('+v-os', 'a_user another_user')
self._checkModeChange([(True, 'v', ('a_user',)),
(False, 'os', ('another_user', None))])
def test_tooManyModeParameters(self):
"""
Passing an argument to modes that take no parameters results in
L{IRCClient.modeChanged} not being called and an error being logged.
"""
self._sendModeChange('+s', 'wrong')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEquals(len(errors), 1)
self.assertSubstring(
'Too many parameters', errors[0].getErrorMessage())
def test_tooFewModeParameters(self):
"""
Passing no arguments to modes that do take parameters results in
L{IRCClient.modeChange} not being called and an error being logged.
"""
self._sendModeChange('+o')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEquals(len(errors), 1)
self.assertSubstring(
'Not enough parameters', errors[0].getErrorMessage())
def test_userMode(self):
"""
A C{MODE} message whose target is our user (the nickname of our user,
to be precise), as opposed to a channel, will be parsed according to
the modes specified by L{IRCClient.getUserModeParams}.
"""
target = self.client.nickname
# Mode "o" on channels is supposed to take a parameter, but since this
# is not a channel this will not cause an exception.
self._sendModeChange('+o', target=target)
self._checkModeChange([(True, 'o', (None,))], target=target)
def getUserModeParams():
return ['Z', '']
# Introduce our own user mode that takes an argument.
self.patch(self.client, 'getUserModeParams', getUserModeParams)
self._sendModeChange('+Z', 'an_arg', target=target)
self._checkModeChange([(True, 'Z', ('an_arg',))], target=target)
class BasicServerFunctionalityTestCase(unittest.TestCase):
def setUp(self):
self.f = StringIOWithoutClosing()
self.t = protocol.FileWrapper(self.f)
self.p = irc.IRC()
self.p.makeConnection(self.t)
def check(self, s):
self.assertEquals(self.f.getvalue(), s)
def testPrivmsg(self):
self.p.privmsg("this-is-sender", "this-is-recip", "this is message")
self.check(":this-is-sender PRIVMSG this-is-recip :this is message\r\n")
def testNotice(self):
self.p.notice("this-is-sender", "this-is-recip", "this is notice")
self.check(":this-is-sender NOTICE this-is-recip :this is notice\r\n")
def testAction(self):
self.p.action("this-is-sender", "this-is-recip", "this is action")
self.check(":this-is-sender ACTION this-is-recip :this is action\r\n")
def testJoin(self):
self.p.join("this-person", "#this-channel")
self.check(":this-person JOIN #this-channel\r\n")
def testPart(self):
self.p.part("this-person", "#that-channel")
self.check(":this-person PART #that-channel\r\n")
def testWhois(self):
"""
Verify that a whois by the client receives the right protocol actions
from the server.
"""
timestamp = int(time.time()-100)
hostname = self.p.hostname
req = 'requesting-nick'
targ = 'target-nick'
self.p.whois(req, targ, 'target', 'host.com',
'Target User', 'irc.host.com', 'A fake server', False,
12, timestamp, ['#fakeusers', '#fakemisc'])
expected = '\r\n'.join([
':%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User',
':%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server',
':%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time',
':%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc',
':%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.',
'']) % dict(hostname=hostname, timestamp=timestamp, req=req, targ=targ)
self.check(expected)
class DummyClient(irc.IRCClient):
def __init__(self):
self.lines = []
def sendLine(self, m):
self.lines.append(m)
class ClientInviteTests(unittest.TestCase):
"""
Tests for L{IRCClient.invite}.
"""
def setUp(self):
"""
Create a L{DummyClient} to call C{invite} on in test methods.
"""
self.client = DummyClient()
def test_channelCorrection(self):
"""
If the channel name passed to L{IRCClient.invite} does not begin with a
channel prefix character, one is prepended to it.
"""
self.client.invite('foo', 'bar')
self.assertEquals(self.client.lines, ['INVITE foo #bar'])
def test_invite(self):
"""
L{IRCClient.invite} sends an I{INVITE} message with the specified
username and a channel.
"""
self.client.invite('foo', '#bar')
self.assertEquals(self.client.lines, ['INVITE foo #bar'])
class ClientMsgTests(unittest.TestCase):
def setUp(self):
self.client = DummyClient()
def testSingleLine(self):
self.client.msg('foo', 'bar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def testDodgyMaxLength(self):
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 0)
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 3)
def testMultipleLine(self):
maxLen = len('PRIVMSG foo :') + 3 + 2 # 2 for line endings
self.client.msg('foo', 'barbazbo', maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar',
'PRIVMSG foo :baz',
'PRIVMSG foo :bo'])
def testSufficientWidth(self):
msg = 'barbazbo'
maxLen = len('PRIVMSG foo :%s' % (msg,)) + 2
self.client.msg('foo', msg, maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :%s' % (msg,)])
self.client.lines = []
self.client.msg('foo', msg, maxLen-1)
self.assertEquals(2, len(self.client.lines))
self.client.lines = []
self.client.msg('foo', msg, maxLen+1)
self.assertEquals(1, len(self.client.lines))
def test_newlinesAtStart(self):
"""
An LF at the beginning of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', '\nbar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesAtEnd(self):
"""
An LF at the end of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesWithinMessage(self):
"""
An LF within a message causes a new line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEquals(self.client.lines, [
'PRIVMSG foo :bar',
'PRIVMSG foo :baz'
])
def test_consecutiveNewlines(self):
"""
Consecutive LFs do not cause a blank line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEquals(self.client.lines, [
'PRIVMSG foo :bar',
'PRIVMSG foo :baz',
])
def _lengthLimitExceededTest(self, *args):
# The maximum length of a line is 512 bytes, including the line prefix
# and the trailing CRLF.
maxLineLength = irc.MAX_COMMAND_LENGTH - 2 - len('PRIVMSG foo :')
self.client.msg('foo', 'o' * (maxLineLength + 1), *args)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + maxLineLength * 'o',
'PRIVMSG foo :o',
])
def test_longLinesCauseNewLines(self):
"""
Lines that would break the 512-byte barrier cause two lines to be sent.
"""
self._lengthLimitExceededTest()
def test_lengthLimitNone(self):
"""
If C{None} is passed to L{IRCClient.msg} as the length limit, the
default limit of C{MAX_COMMAND_LENGTH} is used.
"""
self._lengthLimitExceededTest(None)
def test_newlinesBeforeLineBreaking(self):
"""
IRCClient breaks on newlines before it breaks long lines.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + '\n' + longline)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline,
])
def test_lineBreakOnWordBoundaries(self):
"""
IRCClient prefers to break long lines at word boundaries.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + ' ' + longline)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline,
])
def testSplitSanity(self):
# Whiteboxing
self.assertRaises(ValueError, irc.split, 'foo', -1)
self.assertRaises(ValueError, irc.split, 'foo', 0)
self.assertEquals([], irc.split('', 1))
self.assertEquals([], irc.split(''))
def test_splitDelimiters(self):
"""
Test that split() skips any delimiter (space or newline) that it finds
at the very beginning of the string segment it is operating on.
Nothing should be added to the output list because of it.
"""
r = irc.split("xx yyz", 2)
self.assertEquals(['xx', 'yy', 'z'], r)
r = irc.split("xx\nyyz", 2)
self.assertEquals(['xx', 'yy', 'z'], r)
def test_splitValidatesLength(self):
"""
split() raises ValueError if given a length <= 0
"""
self.assertRaises(ValueError, irc.split, "foo", 0)
self.assertRaises(ValueError, irc.split, "foo", -1)
def test_say(self):
"""
L{IRCClient.say} prepends the channel prefix C{"#"} if necessary and
then sends the message to the server for delivery to that channel.
"""
self.client.say("thechannel", "the message")
self.assertEquals(self.client.lines, ["PRIVMSG #thechannel :the message"])
class ClientTests(TestCase):
"""
Tests for the protocol-level behavior of IRCClient methods intended to
be called by application code.
"""
def setUp(self):
"""
Create and connect a new L{IRCClient} to a new L{StringTransport}.
"""
self.transport = StringTransport()
self.protocol = IRCClient()
self.protocol.performLogin = False
self.protocol.makeConnection(self.transport)
# Sanity check - we don't want anything to have happened at this
# point, since we're not in a test yet.
self.assertEquals(self.transport.value(), "")
def getLastLine(self, transport):
"""
Return the last IRC message in the transport buffer.
"""
return transport.value().split('\r\n')[-2]
def test_away(self):
"""
L{IRCCLient.away} sends an AWAY command with the specified message.
"""
message = "Sorry, I'm not here."
self.protocol.away(message)
expected = [
'AWAY :%s' % (message,),
'',
]
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_back(self):
"""
L{IRCClient.back} sends an AWAY command with an empty message.
"""
self.protocol.back()
expected = [
'AWAY :',
'',
]
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_whois(self):
"""
L{IRCClient.whois} sends a WHOIS message.
"""
self.protocol.whois('alice')
self.assertEquals(
self.transport.value().split('\r\n'),
['WHOIS alice', ''])
def test_whoisWithServer(self):
"""
L{IRCClient.whois} sends a WHOIS message with a server name if a
value is passed for the C{server} parameter.
"""
self.protocol.whois('alice', 'example.org')
self.assertEquals(
self.transport.value().split('\r\n'),
['WHOIS example.org alice', ''])
def test_register(self):
"""
L{IRCClient.register} sends NICK and USER commands with the
username, name, hostname, server name, and real name specified.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = None
self.protocol.register(username, hostname, servername)
expected = [
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_registerWithPassword(self):
"""
If the C{password} attribute of L{IRCClient} is not C{None}, the
C{register} method also sends a PASS command with it as the
argument.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
expected = [
'PASS %s' % (self.protocol.password,),
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_registerWithTakenNick(self):
"""
Verify that the client repeats the L{IRCClient.setNick} method with a
new value when presented with an C{ERR_NICKNAMEINUSE} while trying to
register.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertNotEquals(lastLine, 'NICK %s' % (username,))
# Keep chaining underscores for each collision
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(lastLine, 'NICK %s' % (username + '__',))
def test_overrideAlterCollidedNick(self):
"""
L{IRCClient.alterCollidedNick} determines how a nickname is altered upon
collision while a user is trying to change to that nickname.
"""
nick = 'foo'
self.protocol.alterCollidedNick = lambda nick: nick + '***'
self.protocol.register(nick)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (nick + '***',))
def test_nickChange(self):
"""
When a NICK command is sent after signon, C{IRCClient.nickname} is set
to the new nickname I{after} the server sends an acknowledgement.
"""
oldnick = 'foo'
newnick = 'bar'
self.protocol.register(oldnick)
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.protocol.setNick(newnick)
self.assertEquals(self.protocol.nickname, oldnick)
self.protocol.irc_NICK('%s!quux@qux' % (oldnick,), [newnick])
self.assertEquals(self.protocol.nickname, newnick)
def test_erroneousNick(self):
"""
Trying to register an illegal nickname results in the default legal
nickname being set, and trying to change a nickname to an illegal
nickname results in the old nickname being kept.
"""
# Registration case: change illegal nickname to erroneousNickFallback
badnick = 'foo'
self.assertEquals(self.protocol._registered, False)
self.protocol.register(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (self.protocol.erroneousNickFallback,))
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.assertEquals(self.protocol._registered, True)
self.protocol.setNick(self.protocol.erroneousNickFallback)
self.assertEquals(
self.protocol.nickname, self.protocol.erroneousNickFallback)
# Illegal nick change attempt after registration. Fall back to the old
# nickname instead of erroneousNickFallback.
oldnick = self.protocol.nickname
self.protocol.setNick(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (badnick,))
self.assertEquals(self.protocol.nickname, oldnick)
def test_describe(self):
"""
L{IRCClient.desrcibe} sends a CTCP ACTION message to the target
specified.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.describe(target, action)
self.protocol.describe(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % (target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_me(self):
"""
L{IRCClient.me} sends a CTCP ACTION message to the target channel
specified.
If the target does not begin with a standard channel prefix,
'#' is prepended.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.me(target, action)
self.protocol.me(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % ('#' + target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
warnings = self.flushWarnings(
offendingFunctions=[self.test_me])
self.assertEquals(
warnings[0]['message'],
"me() is deprecated since Twisted 9.0. Use IRCClient.describe().")
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(len(warnings), 2)
def test_noticedDoesntPrivmsg(self):
"""
The default implementation of L{IRCClient.noticed} doesn't invoke
C{privmsg()}
"""
def privmsg(user, channel, message):
self.fail("privmsg() should not have been called")
self.protocol.privmsg = privmsg
self.protocol.irc_NOTICE('spam', "I don't want any spam!")
|
the-stack_106_21483
|
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""ไธไธชๅฏน้ฃ่นๆๅค็ไฝ็ฝฎๅๅปบไธไธชๅญๅผนๅฏน่ฑก"""
def __init__(self, ai_settings, screen, ship):
"""ๅจ้ฃ่นๆๅค็ไฝ็ฝฎๅๅปบไธไธชๅญๅผนๅฏน่ฑก"""
super().__init__()
self.screen = screen
# ๅจ๏ผ0๏ผ0๏ผๅบๅๅปบไธไธช่กจ็คบๅญๅผน็็ฉๅฝข๏ผๅจ่ฎพ็ฝฎๆญฃ็กฎ็ไฝ็ฝฎ
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# ๅญๅจ็จๅฐๆฐ่กจ็คบ็ๅญๅผนไฝ็ฝฎ
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""ๅไธ็งปๅจๅญๅผน"""
# ๆดๆฐ่กจ็คบๅญๅผนไฝ็ฝฎ็ๅฐๆฐๅผ
self.y -= self.speed_factor
# ๆดๆฐ่กจ็คบๅญๅผน็rect็ไฝ็ฝฎ
self.rect.y = self.y
def draw_bullet(self):
"""ๅจๅฑๅนไธ็ปๅถๅญๅผน"""
pygame.draw.rect(self.screen, self.color, self.rect)
|
the-stack_106_21484
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 21:51:31 2019
@author: Medeea
"""
#Simple Linear Regression
#Importing the libraries
#3 libraries
#mathematic tools
import numpy as np
#plot nice charts
import matplotlib.pyplot as plt
#import datasets
import pandas as pd
#importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
#Splitting the dataset into the Training set and Test set
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
#Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
#Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
#Predicting the Test set results
y_pred = regressor.predict(X_test)
#Visualising the Training set results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
#Visualising the Test set results
|
the-stack_106_21485
|
import threading
from contextlib import contextmanager
import os
from os.path import abspath, join as pjoin
import shutil
from subprocess import check_call, check_output, STDOUT
import sys
from tempfile import mkdtemp
from . import compat
from .in_process import _in_proc_script_path
__all__ = [
'BackendUnavailable',
'BackendInvalid',
'HookMissing',
'UnsupportedOperation',
'default_subprocess_runner',
'quiet_subprocess_runner',
'Pep517HookCaller',
]
@contextmanager
def tempdir():
td = mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
class BackendUnavailable(Exception):
"""Will be raised if the backend cannot be imported in the hook process."""
def __init__(self, traceback):
self.traceback = traceback
class BackendInvalid(Exception):
"""Will be raised if the backend is invalid."""
def __init__(self, backend_name, backend_path, message):
self.backend_name = backend_name
self.backend_path = backend_path
self.message = message
class HookMissing(Exception):
"""Will be raised on missing hooks."""
def __init__(self, hook_name):
super(HookMissing, self).__init__(hook_name)
self.hook_name = hook_name
class UnsupportedOperation(Exception):
"""May be raised by build_sdist if the backend indicates that it can't."""
def __init__(self, traceback):
self.traceback = traceback
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""The default method of calling the wrapper subprocess."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_call(cmd, cwd=cwd, env=env)
def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""A method of calling the wrapper subprocess while suppressing output."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
def norm_and_check(source_tree, requested):
"""Normalise and check a backend path.
Ensure that the requested backend path is specified as a relative path,
and resolves to a location under the given source tree.
Return an absolute version of the requested path.
"""
if os.path.isabs(requested):
raise ValueError("paths must be relative")
abs_source = os.path.abspath(source_tree)
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
# We have to use commonprefix for Python 2.7 compatibility. So we
# normalise case to avoid problems because commonprefix is a character
# based comparison :-(
norm_source = os.path.normcase(abs_source)
norm_requested = os.path.normcase(abs_requested)
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
raise ValueError("paths must be inside source tree")
return abs_requested
class Pep517HookCaller(object):
"""A wrapper around a source directory to be built with a PEP 517 backend.
:param source_dir: The path to the source directory, containing
pyproject.toml.
:param build_backend: The build backend spec, as per PEP 517, from
pyproject.toml.
:param backend_path: The backend path, as per PEP 517, from pyproject.toml.
:param runner: A callable that invokes the wrapper subprocess.
:param python_executable: The Python executable used to invoke the backend
The 'runner', if provided, must expect the following:
- cmd: a list of strings representing the command and arguments to
execute, as would be passed to e.g. 'subprocess.check_call'.
- cwd: a string representing the working directory that must be
used for the subprocess. Corresponds to the provided source_dir.
- extra_environ: a dict mapping environment variable names to values
which must be set for the subprocess execution.
"""
def __init__(
self,
source_dir,
build_backend,
backend_path=None,
runner=None,
python_executable=None,
):
if runner is None:
runner = default_subprocess_runner
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
if backend_path:
backend_path = [
norm_and_check(self.source_dir, p) for p in backend_path
]
self.backend_path = backend_path
self._subprocess_runner = runner
if not python_executable:
python_executable = sys.executable
self.python_executable = python_executable
@contextmanager
def subprocess_runner(self, runner):
"""A context manager for temporarily overriding the default subprocess
runner.
"""
prev = self._subprocess_runner
self._subprocess_runner = runner
try:
yield
finally:
self._subprocess_runner = prev
def get_requires_for_build_wheel(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_wheel', {
'config_settings': config_settings
})
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None,
_allow_fallback=True):
"""Prepare a ``*.dist-info`` folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build a wheel,
and the dist-info extracted from that (unless _allow_fallback is
False).
"""
return self._call_hook('prepare_metadata_for_build_wheel', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
'_allow_fallback': _allow_fallback,
})
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_sdist(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["setuptools >= 26"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
def build_sdist(self, sdist_directory, config_settings=None):
"""Build an sdist from this project.
Returns the name of the newly created file.
This calls the 'build_sdist' backend hook in a subprocess.
"""
return self._call_hook('build_sdist', {
'sdist_directory': abspath(sdist_directory),
'config_settings': config_settings,
})
def _call_hook(self, hook_name, kwargs):
# On Python 2, pytoml returns Unicode values (which is correct) but the
# environment passed to check_call needs to contain string values. We
# convert here by encoding using ASCII (the backend can only contain
# letters, digits and _, . and : characters, and will be used as a
# Python identifier, so non-ASCII content is wrong on Python 2 in
# any case).
# For backend_path, we use sys.getfilesystemencoding.
if sys.version_info[0] == 2:
build_backend = self.build_backend.encode('ASCII')
else:
build_backend = self.build_backend
extra_environ = {'PEP517_BUILD_BACKEND': build_backend}
if self.backend_path:
backend_path = os.pathsep.join(self.backend_path)
if sys.version_info[0] == 2:
backend_path = backend_path.encode(sys.getfilesystemencoding())
extra_environ['PEP517_BACKEND_PATH'] = backend_path
with tempdir() as td:
hook_input = {'kwargs': kwargs}
compat.write_json(hook_input, pjoin(td, 'input.json'),
indent=2)
# Run the hook in a subprocess
with _in_proc_script_path() as script:
python = self.python_executable
self._subprocess_runner(
[python, abspath(str(script)), hook_name, td],
cwd=self.source_dir,
extra_environ=extra_environ
)
data = compat.read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation(data.get('traceback', ''))
if data.get('no_backend'):
raise BackendUnavailable(data.get('traceback', ''))
if data.get('backend_invalid'):
raise BackendInvalid(
backend_name=self.build_backend,
backend_path=self.backend_path,
message=data.get('backend_error', '')
)
if data.get('hook_missing'):
raise HookMissing(hook_name)
return data['return_val']
class LoggerWrapper(threading.Thread):
"""
Read messages from a pipe and redirect them
to a logger (see python's logging module).
"""
def __init__(self, logger, level):
threading.Thread.__init__(self)
self.daemon = True
self.logger = logger
self.level = level
# create the pipe and reader
self.fd_read, self.fd_write = os.pipe()
self.reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
return self.fd_write
@staticmethod
def remove_newline(msg):
return msg[:-1] if msg.endswith(os.linesep) else msg
def run(self):
for line in self.reader:
self._write(self.remove_newline(line))
def _write(self, message):
self.logger.log(self.level, message)
|
the-stack_106_21486
|
import os
import sys
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import imagelib.filter as filt
import imagelib.utility as util
if __name__ == '__main__':
LOAD_IMG_PATH = '/home/uros/Desktop/workspace/grid.jpg'
MAIN_EDGES_PATH = '/home/uros/Desktop/workspace/grid_thin_edge.jpg'
CANNY_PATH = '/home/uros/Desktop/workspace/grid_canny.jpg'
HOUGH_PATH = '/home/uros/Desktop/workspace/grid_hough.jpg'
DBG_PATH = '/home/uros/Desktop/workspace/grid_dbg.jpg'
SOBEL_EDGE = '/home/uros/Desktop/workspace/grid_sobel.jpg'
img = util.imopen(LOAD_IMG_PATH)
rgb_img = img[:, :, :]
img = util.rgb2gray(img)
edge_img, edge_dir = filt.sobel_edge_det(img, (3, 3))
util.imwrite(SOBEL_EDGE, edge_img)
main_edge_img = filt.non_max_supression(edge_img, edge_dir)
util.imwrite(MAIN_EDGES_PATH, main_edge_img)
high = 60
low = 20
canny_edges_image = filt.dual_threshold(main_edge_img, high, low)
canny_edges_image = canny_edges_image[5:-5, 5:-5]
util.imwrite(CANNY_PATH, canny_edges_image)
hough_edges, hough_img = filt.hough_transform(canny_edges_image, 100, 100, 100)
util.imwrite(DBG_PATH, hough_img)
rgb_img = rgb_img[5:-5, 5:-5]
for x in range(hough_img.shape[0]):
for y in range(hough_img.shape[1]):
if hough_img[x, y] == 1:
rgb_img[x, y, 1] = 255.
util.imwrite(HOUGH_PATH, rgb_img)
|
the-stack_106_21487
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from copy import deepcopy
import numpy as np
from graph.types import PoolingParameters
from graph.types.pooling import MaxPoolParameters
from quantization.new_qrec import QRec
from quantization.qtype import QType
from quantization.unified_quantization_handler import (in_qs_constraint,
options,
out_qs_constraint,
params_type, priority)
from ..mult_quantization_handler import MultQuantizionHandler
LOG = logging.getLogger('nntool.' + __name__)
AT_SW_KER_IN_ORDER = [['c', 'h', 'w']]
AT_SW_KER_OUT_ORDER = [['c', 'h', 'w']]
AT_NE16_KER_IN_ORDER = [['h', 'w', 'c']]
AT_NE16_KER_OUT_ORDER = [['h', 'w', 'c']]
@params_type(PoolingParameters)
@in_qs_constraint({'dtype': set([np.int8])})
@out_qs_constraint({'dtype': set([np.int8])})
class PoolingMult(MultQuantizionHandler):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
# copy in_qs because we may modify it
in_qs = in_qs.copy()
opts = kwargs['opts']
force_out_qs, out_dtype = cls.get_mult_opts(**kwargs)
force_out_q = force_out_qs and force_out_qs[0]
G = kwargs['G']
in_q = in_qs[0]
if (in_q.is_asymmetric and isinstance(params, PoolingParameters) and params.padding.has_padding):
in_qs = cls.force_symmetric(in_qs)
if in_qs is None:
return None
in_q = in_qs[0]
cls.check_valid_ranges(params, stats, idx=0, dirs='in')
min_val = stats['range_in'][0]['min']
max_val = stats['range_in'][0]['max']
if force_out_q:
if force_out_q.is_asymmetric and not opts.get('allow_asymmetric'):
LOG.warning('%s could be asymmetricaly quantized but allow_asymmetric option not selected', params.name)
return None
o_q = force_out_q
in_q = deepcopy(force_out_q)
if force_out_q.dtype != in_q.dtype or force_out_q.zero_point != in_q.zero_point:
if in_q.forced and force_out_q.zero_point != 0:
return None
LOG.warning('node %s output forced to range %s/%s %s - actual range %s/%s',
params.name, o_q.min, o_q.max, "asymmetric" if o_q.is_asymmetric else "symmetric",
min_val, max_val)
else:
o_q = deepcopy(in_q)
cls.check_order(params, AT_SW_KER_IN_ORDER, AT_SW_KER_OUT_ORDER)
return QRec.scaled(in_qs=[in_q],
out_qs=[o_q])
@classmethod
def can_handle_asymmetric_input(cls, params, **kwargs):
return not isinstance(params, PoolingParameters) or not params.padding.has_padding
@classmethod
def _get_in_qs_from_stats(cls, params, stats, in_qs, **kwargs):
return [QType.from_min_max_sq(stats['range_in'][idx]['min'],
stats['range_in'][idx]['max'],
dtype=np.int8,
asymmetric=cls.can_handle_asymmetric_input(params, **kwargs) and in_qs[idx].is_asymmetric)
if dim is not None else None
for idx, dim in enumerate(params.in_dims)]
@options(
{
'name': 'use_ne16',
'type': bool,
'help': 'enable use of NE16 kernels (if supported) on this layer',
'default': False
},
{
'name': 'force_ne16',
'type': bool,
'help': 'force use of NE16 kernels on this layer - may not be supported for model generation',
'default': False
}
)
@params_type(PoolingParameters)
@in_qs_constraint({'dtype': set([np.uint8])})
@out_qs_constraint({'dtype': set([np.uint8])})
@priority(2)
class NE16PoolingMult(MultQuantizionHandler):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
# copy in_qs because we may modify it
in_qs = in_qs.copy()
opts = kwargs['opts']
force_out_qs, out_dtype = cls.get_mult_opts(**kwargs)
force_out_q = force_out_qs and force_out_qs[0]
G = kwargs['G']
in_q = in_qs[0]
cls.check_valid_ranges(params, stats, idx=0, dirs='in')
min_val = stats['range_in'][0]['min']
max_val = stats['range_in'][0]['max']
if force_out_q:
# get rid of the force out if ne16 is not selected.
if not (opts.get('use_ne16') or opts.get('force_ne16')):
LOG.info(
'%s ne16 max pool possible but ne16 mode not enabled', params.name)
return None
o_q = force_out_q
if in_q.forced and in_q.zero_point != o_q.zero_point:
return None
in_q = deepcopy(o_q)
LOG.warning('node %s output forced to range %s/%s - actual range %s/%s %s',
params.name, o_q.min, o_q.max, min_val, max_val,
"asymmetric" if o_q.is_asymmetric else "symmetric")
else:
o_q = deepcopy(in_q)
o_q.attr.ne16 = True
cls.check_order(params, AT_NE16_KER_IN_ORDER, AT_NE16_KER_OUT_ORDER)
return QRec.scaled(in_qs=[in_q],
out_qs=[o_q],
ne16=True)
@classmethod
def _get_in_qs_from_stats(cls, params, stats, in_qs, **kwargs):
return [QType.from_min_max_sq(stats['range_in'][idx]['min'],
stats['range_in'][idx]['max'],
dtype=np.uint8,
asymmetric=cls.can_handle_asymmetric_input(params, **kwargs) and in_qs[idx].is_asymmetric)
if dim is not None else None
for idx, dim in enumerate(params.in_dims)]
@classmethod
def can_handle_asymmetric_input(cls, params, **kwargs):
return not isinstance(params, PoolingParameters) or not params.padding.has_padding
|
the-stack_106_21488
|
from utils import *
class Smoothing():
def __init__(self):
pass
# add min max to the image
def scale(self,img):
studyArea = img.geometry().bounds()
maxs = img.reduceRegion(reducer=ee.Reducer.percentile([99.99]),geometry=studyArea,scale=300,maxPixels=1e13)
mins = img.reduceRegion(reducer=ee.Reducer.percentile([0.01]),geometry=studyArea,scale=300,maxPixels=1e13)
return img.set("max",ee.Number(maxs.get("Mode"))).set("min",ee.Number(mins.get("Mode")))
# scale the image
def scaleImg(self,img):
maxs = ee.Number(img.get("max"))
mins = ee.Number(img.get("min"))
result = ee.Image(img.subtract(mins)).divide(maxs.subtract(mins)).multiply(100)
return result.min(100).set('time_start',img.get('system:time_start'));
# Function to compute the inverse log ratio of a regression results to
# transform back to percent units
def inverseLogRatio(self,image):
bands = image.bandNames()
ilrImage = ee.Image(100).divide(ee.Image(1).add(image.exp())).rename(bands)
return ilrImage
def whittakerSmoothing(self,imageCollection, isCompositional = False, lamb = 5):
# quick configs to set defaults
def toFl(image):
return image.toFloat()
# procedure start
ic = imageCollection.map(toFl)
dimension = ic.size()
identity_mat = ee.Array.identity(dimension)
difference_mat = getDifferenceMatrix(identity_mat,3)
difference_mat_transpose = difference_mat.transpose()
lamda_difference_mat = difference_mat_transpose.multiply(lamb)
res_mat = lamda_difference_mat.matrixMultiply(difference_mat)
hat_matrix = res_mat.add(identity_mat)
# backing up original data
original = ic
def getProperties(image):
return ee.Image(image).toDictionary()
# get original image properties
properties = ic.toList(10000).map(getProperties)
# if data is compositional
# calculate the logratio of an image between 0 and 100. First
# clamps between delta and 100-delta, where delta is a small positive value.
if (isCompositional):
def clampImage(image):
delta = 0.001
bands = image.bandNames()
image = image.clamp(delta,100-delta)
image = (ee.Image.constant(100).subtract(image)).divide(image).log().rename(bands)
return image
ic = ic.map(clampImage)
arrayImage = original.toArray()
coeffimage = ee.Image(hat_matrix)
smoothImage = coeffimage.matrixSolve(arrayImage)
def getImageId(image):
return ee.Image(image).id()
idlist = ic.toList(10000).map(getImageId)
bandlist = ee.Image(ic.first()).bandNames()
flatImage = smoothImage.arrayFlatten([idlist,bandlist])
smoothCollection = ee.ImageCollection(unpack(flatImage, idlist, bandlist))
if (isCompositional):
smoothCollection = smoothCollection.map(inverseLogRatio)
def addSuffix(band):
return ee.String(band).cat('_fitted')
# get new band names by adding suffix fitted
newBandNames = bandlist.map(addSuffix)
# rename the bands in smoothened images
smoothCollection = smoothCollection.select(bandlist, newBandNames)
# a really dumb way to loose the google earth engine generated ID so that the two
# images can be combined for the chart
dumbimg = arrayImage.arrayFlatten([idlist,bandlist])
dumbcoll = ee.ImageCollection(unpack(dumbimg,idlist, bandlist))
outCollection = dumbcoll.combine(smoothCollection)
outCollList = outCollection.toList(10000)
def addPropBack(image):
return ee.Image(image).set(properties.get(outCollList.indexOf(image)))
outCollectionProp = outCollList.map(addPropBack)
residue_sq = smoothImage.subtract(arrayImage).pow(ee.Image(2)).divide(dimension)
rmse_array = residue_sq.arrayReduce(ee.Reducer.sum(),[0]).pow(ee.Image(1/2))
rmseImage = rmse_array.arrayFlatten([["rmse"],bandlist])
return (ee.ImageCollection(outCollectionProp), rmseImage)
def setTime(self,img):
return img.set("system:time_start",img.get("time_start"))
def whittakerSmoothen(collection):
smooth = Smoothing()
collection = collection.map(smooth.scale)
collection = collection.map(smooth.scaleImg)
smoothingResults, rmse = smooth.whittakerSmoothing(collection);
smoothingResults = smoothingResults.map(smooth.setTime)
return ee.ImageCollection(smoothingResults.select(["Mode_fitted"])), ee.Image(rmse)
|
the-stack_106_21489
|
# -*- coding: utf-8 -*-
"""The ants module provides basic functions for interfacing with ants functions.
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import range, str
import os
from ...external.due import BibTeX
from ...utils.filemanip import split_filename, copyfile, which
from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined
from .base import ANTSCommand, ANTSCommandInputSpec
class AtroposInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3,
2,
4,
argstr='--image-dimensionality %d',
usedefault=True,
desc='image dimension (2, 3, or 4)')
intensity_images = InputMultiPath(
File(exists=True), argstr="--intensity-image %s...", mandatory=True)
mask_image = File(exists=True, argstr='--mask-image %s', mandatory=True)
initialization = traits.Enum(
'Random',
'Otsu',
'KMeans',
'PriorProbabilityImages',
'PriorLabelImage',
argstr="%s",
requires=['number_of_tissue_classes'],
mandatory=True)
prior_probability_images = InputMultiPath(File(exists=True))
number_of_tissue_classes = traits.Int(mandatory=True)
prior_weighting = traits.Float()
prior_probability_threshold = traits.Float(requires=['prior_weighting'])
likelihood_model = traits.Str(argstr="--likelihood-model %s")
mrf_smoothing_factor = traits.Float(argstr="%s")
mrf_radius = traits.List(traits.Int(), requires=['mrf_smoothing_factor'])
icm_use_synchronous_update = traits.Bool(argstr="%s")
maximum_number_of_icm_terations = traits.Int(
requires=['icm_use_synchronous_update'])
n_iterations = traits.Int(argstr="%s")
convergence_threshold = traits.Float(requires=['n_iterations'])
posterior_formulation = traits.Str(argstr="%s")
use_random_seed = traits.Bool(
True,
argstr='--use-random-seed %d',
desc='use random seed value over constant',
usedefault=True)
use_mixture_model_proportions = traits.Bool(
requires=['posterior_formulation'])
out_classified_image_name = File(
argstr="%s", genfile=True, hash_files=False)
save_posteriors = traits.Bool()
output_posteriors_name_template = traits.Str(
'POSTERIOR_%02d.nii.gz', usedefault=True)
class AtroposOutputSpec(TraitedSpec):
classified_image = File(exists=True)
posteriors = OutputMultiPath(File(exist=True))
class Atropos(ANTSCommand):
"""A finite mixture modeling (FMM) segmentation approach with possibilities for
specifying prior constraints. These prior constraints include the specification
of a prior label image, prior probability images (one for each class), and/or an
MRF prior to enforce spatial smoothing of the labels. Similar algorithms include
FAST and SPM.
Examples
--------
>>> from nipype.interfaces.ants import Atropos
>>> at = Atropos()
>>> at.inputs.dimension = 3
>>> at.inputs.intensity_images = 'structural.nii'
>>> at.inputs.mask_image = 'mask.nii'
>>> at.inputs.initialization = 'PriorProbabilityImages'
>>> at.inputs.prior_probability_images = ['rc1s1.nii', 'rc1s2.nii']
>>> at.inputs.number_of_tissue_classes = 2
>>> at.inputs.prior_weighting = 0.8
>>> at.inputs.prior_probability_threshold = 0.0000001
>>> at.inputs.likelihood_model = 'Gaussian'
>>> at.inputs.mrf_smoothing_factor = 0.2
>>> at.inputs.mrf_radius = [1, 1, 1]
>>> at.inputs.icm_use_synchronous_update = True
>>> at.inputs.maximum_number_of_icm_terations = 1
>>> at.inputs.n_iterations = 5
>>> at.inputs.convergence_threshold = 0.000001
>>> at.inputs.posterior_formulation = 'Socrates'
>>> at.inputs.use_mixture_model_proportions = True
>>> at.inputs.save_posteriors = True
>>> at.cmdline
'Atropos --image-dimensionality 3 --icm [1,1] \
--initialization PriorProbabilityImages[2,priors/priorProbImages%02d.nii,0.8,1e-07] --intensity-image structural.nii \
--likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \
--output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] --use-random-seed 1'
"""
input_spec = AtroposInputSpec
output_spec = AtroposOutputSpec
_cmd = 'Atropos'
def _format_arg(self, opt, spec, val):
if opt == 'initialization':
retval = "--initialization %s[%d" % (
val, self.inputs.number_of_tissue_classes)
if val == "PriorProbabilityImages":
_, _, ext = split_filename(
self.inputs.prior_probability_images[0])
retval += ",priors/priorProbImages%02d" + \
ext + ",%g" % self.inputs.prior_weighting
if isdefined(self.inputs.prior_probability_threshold):
retval += ",%g" % self.inputs.prior_probability_threshold
return retval + "]"
if opt == 'mrf_smoothing_factor':
retval = "--mrf [%g" % val
if isdefined(self.inputs.mrf_radius):
retval += ",%s" % self._format_xarray(
[str(s) for s in self.inputs.mrf_radius])
return retval + "]"
if opt == "icm_use_synchronous_update":
retval = "--icm [%d" % val
if isdefined(self.inputs.maximum_number_of_icm_terations):
retval += ",%g" % self.inputs.maximum_number_of_icm_terations
return retval + "]"
if opt == "n_iterations":
retval = "--convergence [%d" % val
if isdefined(self.inputs.convergence_threshold):
retval += ",%g" % self.inputs.convergence_threshold
return retval + "]"
if opt == "posterior_formulation":
retval = "--posterior-formulation %s" % val
if isdefined(self.inputs.use_mixture_model_proportions):
retval += "[%d]" % self.inputs.use_mixture_model_proportions
return retval
if opt == "out_classified_image_name":
retval = "--output [%s" % val
if isdefined(self.inputs.save_posteriors):
retval += ",%s" % self.inputs.output_posteriors_name_template
return retval + "]"
return super(Atropos, self)._format_arg(opt, spec, val)
def _run_interface(self, runtime, correct_return_codes=[0]):
if self.inputs.initialization == "PriorProbabilityImages":
priors_directory = os.path.join(os.getcwd(), "priors")
if not os.path.exists(priors_directory):
os.makedirs(priors_directory)
_, _, ext = split_filename(self.inputs.prior_probability_images[0])
for i, f in enumerate(self.inputs.prior_probability_images):
target = os.path.join(priors_directory,
'priorProbImages%02d' % (i + 1) + ext)
if not (os.path.exists(target)
and os.path.realpath(target) == os.path.abspath(f)):
copyfile(
os.path.abspath(f),
os.path.join(priors_directory,
'priorProbImages%02d' % (i + 1) + ext))
runtime = super(Atropos, self)._run_interface(runtime)
return runtime
def _gen_filename(self, name):
if name == 'out_classified_image_name':
output = self.inputs.out_classified_image_name
if not isdefined(output):
_, name, ext = split_filename(self.inputs.intensity_images[0])
output = name + '_labeled' + ext
return output
return None
def _list_outputs(self):
outputs = self._outputs().get()
outputs['classified_image'] = os.path.abspath(
self._gen_filename('out_classified_image_name'))
if isdefined(
self.inputs.save_posteriors) and self.inputs.save_posteriors:
outputs['posteriors'] = []
for i in range(self.inputs.number_of_tissue_classes):
outputs['posteriors'].append(
os.path.abspath(
self.inputs.output_posteriors_name_template % (i + 1)))
return outputs
class LaplacianThicknessInputSpec(ANTSCommandInputSpec):
input_wm = File(
argstr='%s',
mandatory=True,
copyfile=True,
desc='white matter segmentation image',
position=1)
input_gm = File(
argstr='%s',
mandatory=True,
copyfile=True,
desc='gray matter segmentation image',
position=2)
output_image = File(
desc='name of output file',
argstr='%s',
position=3,
name_source=['input_wm'],
name_template='%s_thickness',
keep_extension=True,
hash_files=False)
smooth_param = traits.Float(argstr='smoothparam=%d', desc='', position=4)
prior_thickness = traits.Float(
argstr='priorthickval=%d', desc='', position=5)
dT = traits.Float(argstr='dT=%d', desc='', position=6)
sulcus_prior = traits.Bool(argstr='use-sulcus-prior', desc='', position=7)
opt_tolerance = traits.Float(
argstr='optional-laplacian-tolerance=%d', desc='', position=8)
class LaplacianThicknessOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Cortical thickness')
class LaplacianThickness(ANTSCommand):
"""Calculates the cortical thickness from an anatomical image
Examples
--------
>>> from nipype.interfaces.ants import LaplacianThickness
>>> cort_thick = LaplacianThickness()
>>> cort_thick.inputs.input_wm = 'white_matter.nii.gz'
>>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz'
>>> cort_thick.cmdline
'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz white_matter_thickness.nii.gz'
>>> cort_thick.inputs.output_image = 'output_thickness.nii.gz'
>>> cort_thick.cmdline
'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz'
"""
_cmd = 'LaplacianThickness'
input_spec = LaplacianThicknessInputSpec
output_spec = LaplacianThicknessOutputSpec
class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3,
2,
4,
argstr='-d %d',
usedefault=True,
desc='image dimension (2, 3 or 4)')
input_image = File(
argstr='--input-image %s',
mandatory=True,
desc=('input for bias correction. Negative values or values close to '
'zero should be processed prior to correction'))
mask_image = File(
argstr='--mask-image %s',
desc=('image to specify region to perform final bias correction in'))
weight_image = File(
argstr='--weight-image %s',
desc=('image for relative weighting (e.g. probability map of the white '
'matter) of voxels during the B-spline fitting. '))
output_image = traits.Str(
argstr='--output %s',
desc='output file name',
genfile=True,
hash_files=False)
bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s")
bspline_order = traits.Int(requires=['bspline_fitting_distance'])
shrink_factor = traits.Int(argstr="--shrink-factor %d")
n_iterations = traits.List(traits.Int(), argstr="--convergence %s")
convergence_threshold = traits.Float(requires=['n_iterations'])
save_bias = traits.Bool(
False,
mandatory=True,
usedefault=True,
desc=('True if the estimated bias should be saved to file.'),
xor=['bias_image'])
bias_image = File(
desc='Filename for the estimated bias.', hash_files=False)
copy_header = traits.Bool(
False,
mandatory=True,
usedefault=True,
desc='copy headers of the original image into the '
'output (corrected) file')
class N4BiasFieldCorrectionOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
bias_image = File(exists=True, desc='Estimated bias')
class N4BiasFieldCorrection(ANTSCommand):
"""N4 is a variant of the popular N3 (nonparameteric nonuniform normalization)
retrospective bias correction algorithm. Based on the assumption that the
corruption of the low frequency bias field can be modeled as a convolution of
the intensity histogram by a Gaussian, the basic algorithmic protocol is to
iterate between deconvolving the intensity histogram by a Gaussian, remapping
the intensities, and then spatially smoothing this result by a B-spline modeling
of the bias field itself. The modifications from and improvements obtained over
the original N3 algorithm are described in [Tustison2010]_.
.. [Tustison2010] N. Tustison et al.,
N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging,
29(6):1310-1320, June 2010.
Examples
--------
>>> import copy
>>> from nipype.interfaces.ants import N4BiasFieldCorrection
>>> n4 = N4BiasFieldCorrection()
>>> n4.inputs.dimension = 3
>>> n4.inputs.input_image = 'structural.nii'
>>> n4.inputs.bspline_fitting_distance = 300
>>> n4.inputs.shrink_factor = 3
>>> n4.inputs.n_iterations = [50,50,30,20]
>>> n4.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300 ] \
-d 3 --input-image structural.nii \
--convergence [ 50x50x30x20 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_2 = copy.deepcopy(n4)
>>> n4_2.inputs.convergence_threshold = 1e-6
>>> n4_2.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300 ] \
-d 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_3 = copy.deepcopy(n4_2)
>>> n4_3.inputs.bspline_order = 5
>>> n4_3.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \
-d 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_4 = N4BiasFieldCorrection()
>>> n4_4.inputs.input_image = 'structural.nii'
>>> n4_4.inputs.save_bias = True
>>> n4_4.inputs.dimension = 3
>>> n4_4.cmdline
'N4BiasFieldCorrection -d 3 --input-image structural.nii \
--output [ structural_corrected.nii, structural_bias.nii ]'
"""
_cmd = 'N4BiasFieldCorrection'
input_spec = N4BiasFieldCorrectionInputSpec
output_spec = N4BiasFieldCorrectionOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_corrected' + ext
return output
if name == 'bias_image':
output = self.inputs.bias_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_bias' + ext
return output
return None
def _format_arg(self, name, trait_spec, value):
if ((name == 'output_image') and
(self.inputs.save_bias or isdefined(self.inputs.bias_image))):
bias_image = self._gen_filename('bias_image')
output = self._gen_filename('output_image')
newval = '[ %s, %s ]' % (output, bias_image)
return trait_spec.argstr % newval
if name == 'bspline_fitting_distance':
if isdefined(self.inputs.bspline_order):
newval = '[ %g, %d ]' % (value, self.inputs.bspline_order)
else:
newval = '[ %g ]' % value
return trait_spec.argstr % newval
if name == 'n_iterations':
if isdefined(self.inputs.convergence_threshold):
newval = '[ %s, %g ]' % (
self._format_xarray([str(elt) for elt in value]),
self.inputs.convergence_threshold)
else:
newval = '[ %s ]' % self._format_xarray(
[str(elt) for elt in value])
return trait_spec.argstr % newval
return super(N4BiasFieldCorrection, self)._format_arg(
name, trait_spec, value)
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
skip += ['save_bias', 'bias_image']
return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
if self.inputs.save_bias or isdefined(self.inputs.bias_image):
outputs['bias_image'] = os.path.abspath(
self._gen_filename('bias_image'))
return outputs
def _run_interface(self, runtime, correct_return_codes=(0, )):
runtime = super(N4BiasFieldCorrection, self)._run_interface(
runtime, correct_return_codes)
if self.inputs.copy_header and runtime.returncode in correct_return_codes:
self._copy_header(self._gen_filename('output_image'))
if self.inputs.save_bias or isdefined(self.inputs.bias_image):
self._copy_header(self._gen_filename('bias_image'))
return runtime
def _copy_header(self, fname):
"""Copy header from input image to an output image"""
import nibabel as nb
in_img = nb.load(self.inputs.input_image)
out_img = nb.load(fname, mmap=False)
new_img = out_img.__class__(out_img.get_data(), in_img.affine,
in_img.header)
new_img.set_data_dtype(out_img.get_data_dtype())
new_img.to_filename(fname)
class CorticalThicknessInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)')
anatomical_image = File(
exists=True,
argstr='-a %s',
desc=('Structural *intensity* image, typically T1.'
' If more than one anatomical image is specified,'
' subsequently specified images are used during the'
' segmentation process. However, only the first'
' image is used in the registration of priors.'
' Our suggestion would be to specify the T1'
' as the first image.'),
mandatory=True)
brain_template = File(
exists=True,
argstr='-e %s',
desc=('Anatomical *intensity* template (possibly created using a'
' population data set with buildtemplateparallel.sh in ANTs).'
' This template is *not* skull-stripped.'),
mandatory=True)
brain_probability_mask = File(
exists=True,
argstr='-m %s',
desc='brain probability mask in template space',
copyfile=False,
mandatory=True)
segmentation_priors = InputMultiPath(
File(exists=True), argstr='-p %s', mandatory=True)
out_prefix = traits.Str(
'antsCT_',
argstr='-o %s',
usedefault=True,
desc=('Prefix that is prepended to all output'
' files (default = antsCT_)'))
image_suffix = traits.Str(
'nii.gz',
desc=('any of standard ITK formats,'
' nii.gz is default'),
argstr='-s %s',
usedefault=True)
t1_registration_template = File(
exists=True,
desc=('Anatomical *intensity* template'
' (assumed to be skull-stripped). A common'
' case would be where this would be the same'
' template as specified in the -e option which'
' is not skull stripped.'),
argstr='-t %s',
mandatory=True)
extraction_registration_mask = File(
exists=True,
argstr='-f %s',
desc=('Mask (defined in the template space) used during'
' registration for brain extraction.'))
keep_temporary_files = traits.Int(
argstr='-k %d',
desc='Keep brain extraction/segmentation warps, etc (default = 0).')
max_iterations = traits.Int(
argstr='-i %d',
desc=('ANTS registration max iterations (default = 100x100x70x20)'))
prior_segmentation_weight = traits.Float(
argstr='-w %f',
desc=('Atropos spatial prior *probability* weight for'
' the segmentation'))
segmentation_iterations = traits.Int(
argstr='-n %d',
desc=('N4 -> Atropos -> N4 iterations during segmentation'
' (default = 3)'))
posterior_formulation = traits.Str(
argstr='-b %s',
desc=('Atropos posterior formulation and whether or not'
' to use mixture model proportions.'
''' e.g 'Socrates[1]' (default) or 'Aristotle[1]'.'''
' Choose the latter if you'
' want use the distance priors (see also the -l option'
' for label propagation control).'))
use_floatingpoint_precision = traits.Enum(
0,
1,
argstr='-j %d',
desc=('Use floating point precision in registrations (default = 0)'))
use_random_seeding = traits.Enum(
0,
1,
argstr='-u %d',
desc=('Use random number generated from system clock in Atropos'
' (default = 1)'))
b_spline_smoothing = traits.Bool(
argstr='-v',
desc=('Use B-spline SyN for registrations and B-spline'
' exponential mapping in DiReCT.'))
cortical_label_image = File(
exists=True, desc='Cortical ROI labels to use as a prior for ATITH.')
label_propagation = traits.Str(
argstr='-l %s',
desc=
('Incorporate a distance prior one the posterior formulation. Should be'
''' of the form 'label[lambda,boundaryProbability]' where label'''
' is a value of 1,2,3,... denoting label ID. The label'
' probability for anything outside the current label'
' = boundaryProbability * exp( -lambda * distanceFromBoundary )'
' Intuitively, smaller lambda values will increase the spatial capture'
' range of the distance prior. To apply to all label values, simply omit'
' specifying the label, i.e. -l [lambda,boundaryProbability].'))
quick_registration = traits.Bool(
argstr='-q 1',
desc=
('If = 1, use antsRegistrationSyNQuick.sh as the basis for registration'
' during brain extraction, brain segmentation, and'
' (optional) normalization to a template.'
' Otherwise use antsRegistrationSyN.sh (default = 0).'))
debug = traits.Bool(
argstr='-z 1',
desc=(
'If > 0, runs a faster version of the script.'
' Only for testing. Implies -u 0.'
' Requires single thread computation for complete reproducibility.'
))
class CorticalThicknessOutputSpec(TraitedSpec):
BrainExtractionMask = File(exists=True, desc='brain extraction mask')
BrainSegmentation = File(exists=True, desc='brain segmentaion image')
BrainSegmentationN4 = File(exists=True, desc='N4 corrected image')
BrainSegmentationPosteriors = OutputMultiPath(
File(exists=True), desc='Posterior probability images')
CorticalThickness = File(exists=True, desc='cortical thickness file')
TemplateToSubject1GenericAffine = File(
exists=True, desc='Template to subject affine')
TemplateToSubject0Warp = File(exists=True, desc='Template to subject warp')
SubjectToTemplate1Warp = File(
exists=True, desc='Template to subject inverse warp')
SubjectToTemplate0GenericAffine = File(
exists=True, desc='Template to subject inverse affine')
SubjectToTemplateLogJacobian = File(
exists=True, desc='Template to subject log jacobian')
CorticalThicknessNormedToTemplate = File(
exists=True, desc='Normalized cortical thickness')
BrainVolumes = File(exists=True, desc='Brain volumes as text')
class CorticalThickness(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants.segmentation import CorticalThickness
>>> corticalthickness = CorticalThickness()
>>> corticalthickness.inputs.dimension = 3
>>> corticalthickness.inputs.anatomical_image ='T1.nii.gz'
>>> corticalthickness.inputs.brain_template = 'study_template.nii.gz'
>>> corticalthickness.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz'
>>> corticalthickness.inputs.segmentation_priors = ['BrainSegmentationPrior01.nii.gz',
... 'BrainSegmentationPrior02.nii.gz',
... 'BrainSegmentationPrior03.nii.gz',
... 'BrainSegmentationPrior04.nii.gz']
>>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz'
>>> corticalthickness.cmdline
'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \
-s nii.gz -o antsCT_ -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz'
"""
input_spec = CorticalThicknessInputSpec
output_spec = CorticalThicknessOutputSpec
_cmd = 'antsCorticalThickness.sh'
def _format_arg(self, opt, spec, val):
if opt == 'anatomical_image':
retval = '-a %s' % val
return retval
if opt == 'brain_template':
retval = '-e %s' % val
return retval
if opt == 'brain_probability_mask':
retval = '-m %s' % val
return retval
if opt == 'out_prefix':
retval = '-o %s' % val
return retval
if opt == 't1_registration_template':
retval = '-t %s' % val
return retval
if opt == 'segmentation_priors':
_, _, ext = split_filename(self.inputs.segmentation_priors[0])
retval = "-p nipype_priors/BrainSegmentationPrior%02d" + ext
return retval
return super(CorticalThickness, self)._format_arg(opt, spec, val)
def _run_interface(self, runtime, correct_return_codes=[0]):
priors_directory = os.path.join(os.getcwd(), "nipype_priors")
if not os.path.exists(priors_directory):
os.makedirs(priors_directory)
_, _, ext = split_filename(self.inputs.segmentation_priors[0])
for i, f in enumerate(self.inputs.segmentation_priors):
target = os.path.join(priors_directory,
'BrainSegmentationPrior%02d' % (i + 1) + ext)
if not (os.path.exists(target)
and os.path.realpath(target) == os.path.abspath(f)):
copyfile(os.path.abspath(f), target)
runtime = super(CorticalThickness, self)._run_interface(runtime)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['BrainExtractionMask'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionMask.' +
self.inputs.image_suffix)
outputs['BrainSegmentation'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainSegmentation.' +
self.inputs.image_suffix)
outputs['BrainSegmentationN4'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainSegmentation0N4.' +
self.inputs.image_suffix)
posteriors = []
for i in range(len(self.inputs.segmentation_priors)):
posteriors.append(
os.path.join(os.getcwd(), self.inputs.out_prefix +
'BrainSegmentationPosteriors%02d.' %
(i + 1) + self.inputs.image_suffix))
outputs['BrainSegmentationPosteriors'] = posteriors
outputs['CorticalThickness'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'CorticalThickness.' +
self.inputs.image_suffix)
outputs['TemplateToSubject1GenericAffine'] = os.path.join(
os.getcwd(),
self.inputs.out_prefix + 'TemplateToSubject1GenericAffine.mat')
outputs['TemplateToSubject0Warp'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'TemplateToSubject0Warp.' +
self.inputs.image_suffix)
outputs['SubjectToTemplate1Warp'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'SubjectToTemplate1Warp.' +
self.inputs.image_suffix)
outputs['SubjectToTemplate0GenericAffine'] = os.path.join(
os.getcwd(),
self.inputs.out_prefix + 'SubjectToTemplate0GenericAffine.mat')
outputs['SubjectToTemplateLogJacobian'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'SubjectToTemplateLogJacobian.' + self.inputs.image_suffix)
outputs['CorticalThicknessNormedToTemplate'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'CorticalThickness.' +
self.inputs.image_suffix)
outputs['BrainVolumes'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'brainvols.csv')
return outputs
class BrainExtractionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)')
anatomical_image = File(
exists=True,
argstr='-a %s',
desc=('Structural image, typically T1. If more than one'
' anatomical image is specified, subsequently specified'
' images are used during the segmentation process. However,'
' only the first image is used in the registration of priors.'
' Our suggestion would be to specify the T1 as the first image.'
' Anatomical template created using e.g. LPBA40 data set with'
' buildtemplateparallel.sh in ANTs.'),
mandatory=True)
brain_template = File(
exists=True,
argstr='-e %s',
desc=('Anatomical template created using e.g. LPBA40 data set with'
' buildtemplateparallel.sh in ANTs.'),
mandatory=True)
brain_probability_mask = File(
exists=True,
argstr='-m %s',
desc=('Brain probability mask created using e.g. LPBA40 data set which'
' have brain masks defined, and warped to anatomical template and'
' averaged resulting in a probability image.'),
copyfile=False,
mandatory=True)
out_prefix = traits.Str(
'highres001_',
argstr='-o %s',
usedefault=True,
desc=('Prefix that is prepended to all output'
' files (default = highress001_)'))
extraction_registration_mask = File(
exists=True,
argstr='-f %s',
desc=('Mask (defined in the template space) used during'
' registration for brain extraction.'
' To limit the metric computation to a specific region.'))
image_suffix = traits.Str(
'nii.gz',
desc=('any of standard ITK formats,'
' nii.gz is default'),
argstr='-s %s',
usedefault=True)
use_random_seeding = traits.Enum(
0,
1,
argstr='-u %d',
desc=('Use random number generated from system clock in Atropos'
' (default = 1)'))
keep_temporary_files = traits.Int(
argstr='-k %d',
desc='Keep brain extraction/segmentation warps, etc (default = 0).')
use_floatingpoint_precision = traits.Enum(
0,
1,
argstr='-q %d',
desc=('Use floating point precision in registrations (default = 0)'))
debug = traits.Bool(
argstr='-z 1',
desc=(
'If > 0, runs a faster version of the script.'
' Only for testing. Implies -u 0.'
' Requires single thread computation for complete reproducibility.'
))
class BrainExtractionOutputSpec(TraitedSpec):
BrainExtractionMask = File(exists=True, desc='brain extraction mask')
BrainExtractionBrain = File(exists=True, desc='brain extraction image')
BrainExtractionCSF = File(
exists=True, desc='segmentation mask with only CSF')
BrainExtractionGM = File(
exists=True, desc='segmentation mask with only grey matter')
BrainExtractionInitialAffine = File(exists=True, desc='')
BrainExtractionInitialAffineFixed = File(exists=True, desc='')
BrainExtractionInitialAffineMoving = File(exists=True, desc='')
BrainExtractionLaplacian = File(exists=True, desc='')
BrainExtractionPrior0GenericAffine = File(exists=True, desc='')
BrainExtractionPrior1InverseWarp = File(exists=True, desc='')
BrainExtractionPrior1Warp = File(exists=True, desc='')
BrainExtractionPriorWarped = File(exists=True, desc='')
BrainExtractionSegmentation = File(
exists=True, desc='segmentation mask with CSF, GM, and WM')
BrainExtractionTemplateLaplacian = File(exists=True, desc='')
BrainExtractionTmp = File(exists=True, desc='')
BrainExtractionWM = File(
exists=True, desc='segmenration mask with only white matter')
N4Corrected0 = File(exists=True, desc='N4 bias field corrected image')
N4Truncated0 = File(exists=True, desc='')
class BrainExtraction(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants.segmentation import BrainExtraction
>>> brainextraction = BrainExtraction()
>>> brainextraction.inputs.dimension = 3
>>> brainextraction.inputs.anatomical_image ='T1.nii.gz'
>>> brainextraction.inputs.brain_template = 'study_template.nii.gz'
>>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz'
>>> brainextraction.cmdline
'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \
-s nii.gz -o highres001_'
"""
input_spec = BrainExtractionInputSpec
output_spec = BrainExtractionOutputSpec
_cmd = 'antsBrainExtraction.sh'
def _run_interface(self, runtime, correct_return_codes=(0, )):
# antsBrainExtraction.sh requires ANTSPATH to be defined
out_environ = self._get_environ()
ants_path = out_environ.get('ANTSPATH', None) or os.getenv(
'ANTSPATH', None)
if ants_path is None:
# Check for antsRegistration, which is under bin/ (the $ANTSPATH) instead of
# checking for antsBrainExtraction.sh which is under script/
cmd_path = which('antsRegistration', env=runtime.environ)
if not cmd_path:
raise RuntimeError(
'The environment variable $ANTSPATH is not defined in host "%s", '
'and Nipype could not determine it automatically.' %
runtime.hostname)
ants_path = os.path.dirname(cmd_path)
self.inputs.environ.update({'ANTSPATH': ants_path})
runtime.environ.update({'ANTSPATH': ants_path})
runtime = super(BrainExtraction, self)._run_interface(runtime)
# Still, double-check if it didn't found N4
if 'we cant find' in runtime.stdout:
for line in runtime.stdout.split('\n'):
if line.strip().startswith('we cant find'):
tool = line.strip().replace('we cant find the',
'').split(' ')[0]
break
errmsg = (
'antsBrainExtraction.sh requires "%s" to be found in $ANTSPATH '
'($ANTSPATH="%s").') % (tool, ants_path)
if runtime.stderr is None:
runtime.stderr = errmsg
else:
runtime.stderr += '\n' + errmsg
runtime.returncode = 1
self.raise_exception(runtime)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['BrainExtractionMask'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionMask.' +
self.inputs.image_suffix)
outputs['BrainExtractionBrain'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionBrain.' +
self.inputs.image_suffix)
if isdefined(self.inputs.keep_temporary_files
) and self.inputs.keep_temporary_files != 0:
outputs['BrainExtractionCSF'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionCSF.' +
self.inputs.image_suffix)
outputs['BrainExtractionGM'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionGM.' +
self.inputs.image_suffix)
outputs['BrainExtractionInitialAffine'] = os.path.join(
os.getcwd(),
self.inputs.out_prefix + 'BrainExtractionInitialAffine.mat')
outputs['BrainExtractionInitialAffineFixed'] = os.path.join(
os.getcwd(),
self.inputs.out_prefix + 'BrainExtractionInitialAffineFixed.' +
self.inputs.image_suffix)
outputs['BrainExtractionInitialAffineMoving'] = os.path.join(
os.getcwd(),
self.inputs.out_prefix + 'BrainExtractionInitialAffineMoving.'
+ self.inputs.image_suffix)
outputs['BrainExtractionLaplacian'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionLaplacian.' + self.inputs.image_suffix)
outputs['BrainExtractionPrior0GenericAffine'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionPrior0GenericAffine.mat')
outputs['BrainExtractionPrior1InverseWarp'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionPrior1InverseWarp.' + self.inputs.image_suffix)
outputs['BrainExtractionPrior1Warp'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionPrior1Warp.' + self.inputs.image_suffix)
outputs['BrainExtractionPriorWarped'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionPriorWarped.' + self.inputs.image_suffix)
outputs['BrainExtractionSegmentation'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionSegmentation.' + self.inputs.image_suffix)
outputs['BrainExtractionTemplateLaplacian'] = os.path.join(
os.getcwd(), self.inputs.out_prefix +
'BrainExtractionTemplateLaplacian.' + self.inputs.image_suffix)
outputs['BrainExtractionTmp'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionTmp.' +
self.inputs.image_suffix)
outputs['BrainExtractionWM'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'BrainExtractionWM.' +
self.inputs.image_suffix)
outputs['N4Corrected0'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'N4Corrected0.' +
self.inputs.image_suffix)
outputs['N4Truncated0'] = os.path.join(
os.getcwd(), self.inputs.out_prefix + 'N4Truncated0.' +
self.inputs.image_suffix)
return outputs
class JointFusionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3,
2,
4,
argstr='%d',
position=0,
usedefault=True,
mandatory=True,
desc='image dimension (2, 3, or 4)')
modalities = traits.Int(
argstr='%d',
position=1,
mandatory=True,
desc='Number of modalities or features')
warped_intensity_images = InputMultiPath(
File(exists=True),
argstr="-g %s...",
mandatory=True,
desc='Warped atlas images')
target_image = InputMultiPath(
File(exists=True),
argstr='-tg %s...',
mandatory=True,
desc='Target image(s)')
warped_label_images = InputMultiPath(
File(exists=True),
argstr="-l %s...",
mandatory=True,
desc='Warped atlas segmentations')
method = traits.Str(
default='Joint',
argstr='-m %s',
usedefault=True,
desc=('Select voting method. Options: Joint (Joint'
' Label Fusion). May be followed by optional'
' parameters in brackets, e.g., -m Joint[0.1,2]'))
alpha = traits.Float(
default=0.1,
usedefault=True,
requires=['method'],
desc=('Regularization term added to matrix Mx for inverse'))
beta = traits.Int(
default=2,
usedefault=True,
requires=['method'],
desc=('Exponent for mapping intensity difference to joint error'))
output_label_image = File(
argstr='%s',
mandatory=True,
position=-1,
name_template='%s',
output_name='output_label_image',
desc='Output fusion label map image')
patch_radius = traits.ListInt(
minlen=3,
maxlen=3,
argstr='-rp %s',
desc=('Patch radius for similarity measures, '
'scalar or vector. Default: 2x2x2'))
search_radius = traits.ListInt(
minlen=3,
maxlen=3,
argstr='-rs %s',
desc='Local search radius. Default: 3x3x3')
exclusion_region = File(
exists=True,
argstr='-x %s',
desc=('Specify an exclusion region for the given label.'))
atlas_group_id = traits.ListInt(
argstr='-gp %d...', desc='Assign a group ID for each atlas')
atlas_group_weights = traits.ListInt(
argstr='-gpw %d...',
desc=('Assign the voting weights to each atlas group'))
class JointFusionOutputSpec(TraitedSpec):
output_label_image = File(exists=True)
# TODO: optional outputs - output_posteriors, output_voting_weights
class JointFusion(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import JointFusion
>>> at = JointFusion()
>>> at.inputs.dimension = 3
>>> at.inputs.modalities = 1
>>> at.inputs.method = 'Joint[0.1,2]'
>>> at.inputs.output_label_image ='fusion_labelimage_output.nii'
>>> at.inputs.warped_intensity_images = ['im1.nii',
... 'im2.nii',
... 'im3.nii']
>>> at.inputs.warped_label_images = ['segmentation0.nii.gz',
... 'segmentation1.nii.gz',
... 'segmentation1.nii.gz']
>>> at.inputs.target_image = 'T1.nii'
>>> at.cmdline
'jointfusion 3 1 -m Joint[0.1,2] -tg T1.nii -g im1.nii -g im2.nii -g im3.nii -l segmentation0.nii.gz \
-l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii'
>>> at.inputs.method = 'Joint'
>>> at.inputs.alpha = 0.5
>>> at.inputs.beta = 1
>>> at.inputs.patch_radius = [3,2,1]
>>> at.inputs.search_radius = [1,2,3]
>>> at.cmdline
'jointfusion 3 1 -m Joint[0.5,1] -rp 3x2x1 -rs 1x2x3 -tg T1.nii -g im1.nii -g im2.nii -g im3.nii \
-l segmentation0.nii.gz -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii'
"""
input_spec = JointFusionInputSpec
output_spec = JointFusionOutputSpec
_cmd = 'jointfusion'
def _format_arg(self, opt, spec, val):
if opt == 'method':
if '[' in val:
retval = '-m {0}'.format(val)
else:
retval = '-m {0}[{1},{2}]'.format(
self.inputs.method, self.inputs.alpha, self.inputs.beta)
elif opt == 'patch_radius':
retval = '-rp {0}'.format(self._format_xarray(val))
elif opt == 'search_radius':
retval = '-rs {0}'.format(self._format_xarray(val))
else:
if opt == 'warped_intensity_images':
assert len(val) == self.inputs.modalities * len(self.inputs.warped_label_images), \
"Number of intensity images and label maps must be the same {0}!={1}".format(
len(val), len(self.inputs.warped_label_images))
return super(JointFusion, self)._format_arg(opt, spec, val)
return retval
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_label_image'] = os.path.abspath(
self.inputs.output_label_image)
return outputs
class DenoiseImageInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
2,
3,
4,
argstr='-d %d',
desc='This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, the program tries to infer the '
'dimensionality from the input image.')
input_image = File(
exists=True,
argstr="-i %s",
mandatory=True,
desc='A scalar image is expected as input for noise correction.')
noise_model = traits.Enum(
'Gaussian',
'Rician',
argstr='-n %s',
usedefault=True,
desc=('Employ a Rician or Gaussian noise model.'))
shrink_factor = traits.Int(
default_value=1,
usedefault=True,
argstr='-s %s',
desc=('Running noise correction on large images can'
' be time consuming. To lessen computation time,'
' the input image can be resampled. The shrink'
' factor, specified as a single integer, describes'
' this resampling. Shrink factor = 1 is the default.'))
output_image = File(
argstr="-o %s",
name_source=['input_image'],
hash_files=False,
keep_extension=True,
name_template='%s_noise_corrected',
desc='The output consists of the noise corrected'
' version of the input image.')
save_noise = traits.Bool(
False,
mandatory=True,
usedefault=True,
desc=('True if the estimated noise should be saved to file.'),
xor=['noise_image'])
noise_image = File(
name_source=['input_image'],
hash_files=False,
keep_extension=True,
name_template='%s_noise',
desc='Filename for the estimated noise.')
verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.'))
class DenoiseImageOutputSpec(TraitedSpec):
output_image = File(exists=True)
noise_image = File()
class DenoiseImage(ANTSCommand):
"""
Examples
--------
>>> import copy
>>> from nipype.interfaces.ants import DenoiseImage
>>> denoise = DenoiseImage()
>>> denoise.inputs.dimension = 3
>>> denoise.inputs.input_image = 'im1.nii'
>>> denoise.cmdline
'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1'
>>> denoise_2 = copy.deepcopy(denoise)
>>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz'
>>> denoise_2.inputs.noise_model = 'Rician'
>>> denoise_2.inputs.shrink_factor = 2
>>> denoise_2.cmdline
'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2'
>>> denoise_3 = DenoiseImage()
>>> denoise_3.inputs.input_image = 'im1.nii'
>>> denoise_3.inputs.save_noise = True
>>> denoise_3.cmdline
'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1'
"""
input_spec = DenoiseImageInputSpec
output_spec = DenoiseImageOutputSpec
_cmd = 'DenoiseImage'
def _format_arg(self, name, trait_spec, value):
if ((name == 'output_image') and
(self.inputs.save_noise or isdefined(self.inputs.noise_image))):
newval = '[ %s, %s ]' % (
self._filename_from_source('output_image'),
self._filename_from_source('noise_image'))
return trait_spec.argstr % newval
return super(DenoiseImage, self)._format_arg(name, trait_spec, value)
class AntsJointFusionInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3,
2,
4,
argstr='-d %d',
desc='This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, the program tries to infer the '
'dimensionality from the input image.')
target_image = traits.List(
InputMultiPath(File(exists=True)),
argstr='-t %s',
mandatory=True,
desc='The target image (or '
'multimodal target images) assumed to be '
'aligned to a common image domain.')
atlas_image = traits.List(
InputMultiPath(File(exists=True)),
argstr="-g %s...",
mandatory=True,
desc='The atlas image (or '
'multimodal atlas images) assumed to be '
'aligned to a common image domain.')
atlas_segmentation_image = InputMultiPath(
File(exists=True),
argstr="-l %s...",
mandatory=True,
desc='The atlas segmentation '
'images. For performing label fusion the number '
'of specified segmentations should be identical '
'to the number of atlas image sets.')
alpha = traits.Float(
default_value=0.1,
usedefault=True,
argstr='-a %s',
desc=(
'Regularization '
'term added to matrix Mx for calculating the inverse. Default = 0.1'
))
beta = traits.Float(
default_value=2.0,
usedefault=True,
argstr='-b %s',
desc=('Exponent for mapping '
'intensity difference to the joint error. Default = 2.0'))
retain_label_posterior_images = traits.Bool(
False,
argstr='-r',
usedefault=True,
requires=['atlas_segmentation_image'],
desc=('Retain label posterior probability images. Requires '
'atlas segmentations to be specified. Default = false'))
retain_atlas_voting_images = traits.Bool(
False,
argstr='-f',
usedefault=True,
desc=('Retain atlas voting images. Default = false'))
constrain_nonnegative = traits.Bool(
False,
argstr='-c',
usedefault=True,
desc=('Constrain solution to non-negative weights.'))
patch_radius = traits.ListInt(
minlen=3,
maxlen=3,
argstr='-p %s',
desc=('Patch radius for similarity measures.'
'Default: 2x2x2'))
patch_metric = traits.Enum(
'PC',
'MSQ',
argstr='-m %s',
desc=('Metric to be used in determining the most similar '
'neighborhood patch. Options include Pearson\'s '
'correlation (PC) and mean squares (MSQ). Default = '
'PC (Pearson correlation).'))
search_radius = traits.List(
[3, 3, 3],
minlen=1,
maxlen=3,
argstr='-s %s',
usedefault=True,
desc=('Search radius for similarity measures. Default = 3x3x3. '
'One can also specify an image where the value at the '
'voxel specifies the isotropic search radius at that voxel.'))
exclusion_image_label = traits.List(
traits.Str(),
argstr='-e %s',
requires=['exclusion_image'],
desc=('Specify a label for the exclusion region.'))
exclusion_image = traits.List(
File(exists=True),
desc=('Specify an exclusion region for the given label.'))
mask_image = File(
argstr='-x %s',
exists=True,
desc='If a mask image '
'is specified, fusion is only performed in the mask region.')
out_label_fusion = File(
argstr="%s", hash_files=False, desc='The output label fusion image.')
out_intensity_fusion_name_format = traits.Str(
argstr="",
desc='Optional intensity fusion '
'image file name format. '
'(e.g. "antsJointFusionIntensity_%d.nii.gz")')
out_label_post_prob_name_format = traits.Str(
'antsJointFusionPosterior_%d.nii.gz',
requires=['out_label_fusion', 'out_intensity_fusion_name_format'],
desc='Optional label posterior probability '
'image file name format.')
out_atlas_voting_weight_name_format = traits.Str(
'antsJointFusionVotingWeight_%d.nii.gz',
requires=[
'out_label_fusion', 'out_intensity_fusion_name_format',
'out_label_post_prob_name_format'
],
desc='Optional atlas voting weight image '
'file name format.')
verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.'))
class AntsJointFusionOutputSpec(TraitedSpec):
out_label_fusion = File(exists=True)
out_intensity_fusion_name_format = traits.Str()
out_label_post_prob_name_format = traits.Str()
out_atlas_voting_weight_name_format = traits.Str()
class AntsJointFusion(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AntsJointFusion
>>> antsjointfusion = AntsJointFusion()
>>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
>>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ]
>>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz']
>>> antsjointfusion.inputs.target_image = ['im1.nii']
>>> antsjointfusion.cmdline
"antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \
-b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']"
>>> antsjointfusion.inputs.target_image = [ ['im1.nii', 'im2.nii'] ]
>>> antsjointfusion.cmdline
"antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \
-b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']"
>>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'],
... ['rc2s1.nii','rc2s2.nii'] ]
>>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz',
... 'segmentation1.nii.gz']
>>> antsjointfusion.cmdline
"antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii \
-s 3x3x3 -t ['im1.nii', 'im2.nii']"
>>> antsjointfusion.inputs.dimension = 3
>>> antsjointfusion.inputs.alpha = 0.5
>>> antsjointfusion.inputs.beta = 1.0
>>> antsjointfusion.inputs.patch_radius = [3,2,1]
>>> antsjointfusion.inputs.search_radius = [3]
>>> antsjointfusion.cmdline
"antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii \
-p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']"
>>> antsjointfusion.inputs.search_radius = ['mask.nii']
>>> antsjointfusion.inputs.verbose = True
>>> antsjointfusion.inputs.exclusion_image = ['roi01.nii', 'roi02.nii']
>>> antsjointfusion.inputs.exclusion_image_label = ['1','2']
>>> antsjointfusion.cmdline
"antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \
-o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"
>>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
>>> antsjointfusion.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz'
>>> antsjointfusion.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz'
>>> antsjointfusion.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz'
>>> antsjointfusion.cmdline
"antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \
-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \
-o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, \
ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] \
-p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"
"""
input_spec = AntsJointFusionInputSpec
output_spec = AntsJointFusionOutputSpec
_cmd = 'antsJointFusion'
def _format_arg(self, opt, spec, val):
if opt == 'exclusion_image_label':
retval = []
for ii in range(len(self.inputs.exclusion_image_label)):
retval.append(
'-e {0}[{1}]'.format(self.inputs.exclusion_image_label[ii],
self.inputs.exclusion_image[ii]))
retval = ' '.join(retval)
elif opt == 'patch_radius':
retval = '-p {0}'.format(self._format_xarray(val))
elif opt == 'search_radius':
retval = '-s {0}'.format(self._format_xarray(val))
elif opt == 'out_label_fusion':
if isdefined(self.inputs.out_intensity_fusion_name_format):
if isdefined(self.inputs.out_label_post_prob_name_format):
if isdefined(
self.inputs.out_atlas_voting_weight_name_format):
retval = '-o [{0}, {1}, {2}, {3}]'.format(
self.inputs.out_label_fusion,
self.inputs.out_intensity_fusion_name_format,
self.inputs.out_label_post_prob_name_format,
self.inputs.out_atlas_voting_weight_name_format)
else:
retval = '-o [{0}, {1}, {2}]'.format(
self.inputs.out_label_fusion,
self.inputs.out_intensity_fusion_name_format,
self.inputs.out_label_post_prob_name_format)
else:
retval = '-o [{0}, {1}]'.format(
self.inputs.out_label_fusion,
self.inputs.out_intensity_fusion_name_format)
else:
retval = '-o {0}'.format(self.inputs.out_label_fusion)
elif opt == 'out_intensity_fusion_name_format':
retval = ''
if not isdefined(self.inputs.out_label_fusion):
retval = '-o {0}'.format(
self.inputs.out_intensity_fusion_name_format)
elif opt == 'atlas_image':
atlas_image_cmd = " ".join([
'-g [{0}]'.format(", ".join("'%s'" % fn for fn in ai))
for ai in self.inputs.atlas_image
])
retval = atlas_image_cmd
elif opt == 'target_image':
target_image_cmd = " ".join([
'-t [{0}]'.format(", ".join("'%s'" % fn for fn in ai))
for ai in self.inputs.target_image
])
retval = target_image_cmd
elif opt == 'atlas_segmentation_image':
assert len(val) == len(self.inputs.atlas_image), "Number of specified " \
"segmentations should be identical to the number of atlas image " \
"sets {0}!={1}".format(len(val), len(self.inputs.atlas_image))
atlas_segmentation_image_cmd = " ".join([
'-l {0}'.format(fn)
for fn in self.inputs.atlas_segmentation_image
])
retval = atlas_segmentation_image_cmd
else:
return super(AntsJointFusion, self)._format_arg(opt, spec, val)
return retval
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_label_fusion):
outputs['out_label_fusion'] = os.path.abspath(
self.inputs.out_label_fusion)
if isdefined(self.inputs.out_intensity_fusion_name_format):
outputs['out_intensity_fusion_name_format'] = os.path.abspath(
self.inputs.out_intensity_fusion_name_format)
if isdefined(self.inputs.out_label_post_prob_name_format):
outputs['out_label_post_prob_name_format'] = os.path.abspath(
self.inputs.out_label_post_prob_name_format)
if isdefined(self.inputs.out_atlas_voting_weight_name_format):
outputs['out_atlas_voting_weight_name_format'] = os.path.abspath(
self.inputs.out_atlas_voting_weight_name_format)
return outputs
class KellyKapowskiInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3,
2,
argstr='--image-dimensionality %d',
usedefault=True,
desc='image dimension (2 or 3)')
segmentation_image = File(
exists=True,
argstr='--segmentation-image "%s"',
mandatory=True,
desc=
"A segmentation image must be supplied labeling the gray and white matters."
" Default values = 2 and 3, respectively.",
)
gray_matter_label = traits.Int(
2,
usedefault=True,
desc=
"The label value for the gray matter label in the segmentation_image.")
white_matter_label = traits.Int(
3,
usedefault=True,
desc=
"The label value for the white matter label in the segmentation_image."
)
gray_matter_prob_image = File(
exists=True,
argstr='--gray-matter-probability-image "%s"',
desc=
"In addition to the segmentation image, a gray matter probability image can be"
" used. If no such image is supplied, one is created using the segmentation image"
" and a variance of 1.0 mm.")
white_matter_prob_image = File(
exists=True,
argstr='--white-matter-probability-image "%s"',
desc=
"In addition to the segmentation image, a white matter probability image can be"
" used. If no such image is supplied, one is created using the segmentation image"
" and a variance of 1.0 mm.")
convergence = traits.Str(
default="[50,0.001,10]",
argstr='--convergence "%s"',
usedefault=True,
desc=
"Convergence is determined by fitting a line to the normalized energy profile of"
" the last N iterations (where N is specified by the window size) and determining"
" the slope which is then compared with the convergence threshold.",
)
thickness_prior_estimate = traits.Float(
10,
usedefault=True,
argstr="--thickness-prior-estimate %f",
desc=
"Provides a prior constraint on the final thickness measurement in mm."
)
thickness_prior_image = File(
exists=True,
argstr='--thickness-prior-image "%s"',
desc="An image containing spatially varying prior thickness values.")
gradient_step = traits.Float(
0.025,
usedefault=True,
argstr="--gradient-step %f",
desc="Gradient step size for the optimization.")
smoothing_variance = traits.Float(
1.0, usedefault=True,
argstr="--smoothing-variance %f",
desc="Defines the Gaussian smoothing of the hit and total images.")
smoothing_velocity_field = traits.Float(
1.5, usedefault=True,
argstr="--smoothing-velocity-field-parameter %f",
desc=
"Defines the Gaussian smoothing of the velocity field (default = 1.5)."
" If the b-spline smoothing option is chosen, then this defines the"
" isotropic mesh spacing for the smoothing spline (default = 15).")
use_bspline_smoothing = traits.Bool(
argstr="--use-bspline-smoothing 1",
desc="Sets the option for B-spline smoothing of the velocity field.")
number_integration_points = traits.Int(
10, usedefault=True,
argstr="--number-of-integration-points %d",
desc="Number of compositions of the diffeomorphism per iteration.")
max_invert_displacement_field_iters = traits.Int(
20, usedefault=True,
argstr="--maximum-number-of-invert-displacement-field-iterations %d",
desc="Maximum number of iterations for estimating the invert"
"displacement field.")
cortical_thickness = File(
argstr='--output "%s"',
keep_extension=True,
name_source=["segmentation_image"],
name_template='%s_cortical_thickness',
desc='Filename for the cortical thickness.',
hash_files=False)
warped_white_matter = File(
name_source=["segmentation_image"],
keep_extension=True,
name_template='%s_warped_white_matter',
desc='Filename for the warped white matter file.',
hash_files=False)
class KellyKapowskiOutputSpec(TraitedSpec):
cortical_thickness = File(
desc="A thickness map defined in the segmented gray matter.")
warped_white_matter = File(desc="A warped white matter image.")
class KellyKapowski(ANTSCommand):
""" Nipype Interface to ANTs' KellyKapowski, also known as DiReCT.
DiReCT is a registration based estimate of cortical thickness. It was published
in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based
cortical thickness measurement, Neuroimage 2009, 45:867--879.
Examples
--------
>>> from nipype.interfaces.ants.segmentation import KellyKapowski
>>> kk = KellyKapowski()
>>> kk.inputs.dimension = 3
>>> kk.inputs.segmentation_image = "segmentation0.nii.gz"
>>> kk.inputs.convergence = "[45,0.0,10]"
>>> kk.inputs.thickness_prior_estimate = 10
>>> kk.cmdline
'KellyKapowski --convergence "[45,0.0,10]" \
--output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" \
--image-dimensionality 3 --gradient-step 0.025000 \
--maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 \
--segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 \
--smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000'
"""
_cmd = "KellyKapowski"
input_spec = KellyKapowskiInputSpec
output_spec = KellyKapowskiOutputSpec
references_ = [{
'entry':
BibTeX(
"@book{Das2009867,"
"author={Sandhitsu R. Das and Brian B. Avants and Murray Grossman and James C. Gee},"
"title={Registration based cortical thickness measurement.},"
"journal={NeuroImage},"
"volume={45},"
"number={37},"
"pages={867--879},"
"year={2009},"
"issn={1053-8119},"
"url={http://www.sciencedirect.com/science/article/pii/S1053811908012780},"
"doi={http://dx.doi.org/10.1016/j.neuroimage.2008.12.016}"
"}"),
'description':
'The details on the implementation of DiReCT.',
'tags': ['implementation'],
}]
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
skip += [
'warped_white_matter', 'gray_matter_label', 'white_matter_label'
]
return super(KellyKapowski, self)._parse_inputs(skip=skip)
def _gen_filename(self, name):
if name == 'cortical_thickness':
output = self.inputs.cortical_thickness
if not isdefined(output):
_, name, ext = split_filename(self.inputs.segmentation_image)
output = name + '_cortical_thickness' + ext
return output
if name == 'warped_white_matter':
output = self.inputs.warped_white_matter
if not isdefined(output):
_, name, ext = split_filename(self.inputs.segmentation_image)
output = name + '_warped_white_matter' + ext
return output
return None
def _format_arg(self, opt, spec, val):
if opt == "segmentation_image":
newval = '[{0},{1},{2}]'.format(self.inputs.segmentation_image,
self.inputs.gray_matter_label,
self.inputs.white_matter_label)
return spec.argstr % newval
if opt == "cortical_thickness":
ct = self._gen_filename("cortical_thickness")
wm = self._gen_filename("warped_white_matter")
newval = '[{},{}]'.format(ct, wm)
return spec.argstr % newval
return super(KellyKapowski, self)._format_arg(opt, spec, val)
|
the-stack_106_21490
|
#!/usr/bin/python
"""
Train LogisticRegression on mnist (sample) as baseline
"""
import sys,os
sys.path.append(os.getcwd())
import argparse
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
#from sklearn import datasets, metrics
from sklearn.datasets import fetch_openml
def main():
# Argument parser
ap = argparse.ArgumentParser()
# CLI parameters
ap.add_argument("-s", "--split", required=True, help="Test split percentage")
# Parse input arguments
args = vars(ap.parse_args())
# Parse train-test split value
split = float(args["split"])
# load data and apply min/max scaling
# each image is 8x8 pixels grayscale
print("[INFO] loading MNIST (sample) dataset")
#digits = datasets.load_digits()
data, labels = fetch_openml('mnist_784', version=1, return_X_y=True)
# to data
#data = digits.data.astype("float")
data = data.astype("float")
data = (data - data.min())/(data.max() - data.min())
print("[INFO] samples: {}, dim: {}".format(data.shape[0], data.shape[1]))
# split data
(trainX, testX, trainY, testY) = train_test_split(data,
labels,
test_size=split)
# train network
print("[INFO] training classifier...")
clf = LogisticRegression(penalty='none',
tol=0.1,
solver='saga',
verbose=True,
multi_class='multinomial').fit(trainX, trainY)
# evaluate network
print(["[INFO] evaluating network..."])
predictions = clf.predict(testX)
#predictions = predictions.argmax(axis=1)
cm = classification_report(testY, predictions)
print(cm)
if __name__=="__main__":
main()
|
the-stack_106_21491
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkcenterlinesmoothing.py,v $
## Language: Python
## Date: $Date: 2006/07/17 09:52:56 $
## Version: $Revision: 1.1 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import vtkvmtk
import sys
import pypes
vmtkcenterlinesmoothing = 'vmtkCenterlineSmoothing'
class vmtkCenterlineSmoothing(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Centerlines = None
self.NumberOfSmoothingIterations = 100
self.SmoothingFactor = 0.1
self.SetScriptName('vmtkcenterlinesmoothing')
self.SetScriptDoc('smooth centerlines with a moving average filter')
self.SetInputMembers([
['Centerlines','i','vtkPolyData',1,'','the input centerlines','vmtksurfacereader'],
['NumberOfSmoothingIterations','iterations','int',1,'(0,)'],
['SmoothingFactor','factor','float',1,'(0.0,)']
])
self.SetOutputMembers([
['Centerlines','o','vtkPolyData',1,'','the output centerlines','vmtksurfacewriter']
])
def Execute(self):
if self.Centerlines == None:
self.PrintError('Error: No input centerlines.')
centerlineSmoothing = vtkvmtk.vtkvmtkCenterlineSmoothing()
centerlineSmoothing.SetInputData(self.Centerlines)
centerlineSmoothing.SetNumberOfSmoothingIterations(self.NumberOfSmoothingIterations)
centerlineSmoothing.SetSmoothingFactor(self.SmoothingFactor)
centerlineSmoothing.Update()
self.Centerlines = centerlineSmoothing.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
the-stack_106_21492
|
# -*- coding: utf-8 -*-
"""Cross-platform tools for GitHub workflows.
To extract package version and run pip installations.
"""
import glob
import inspect
import os
import re
import sys
from typing import Optional
from typing import Pattern
from typing import Sequence
if re != sys: # need to protect the #nosec comment from being deleted by zimports
import subprocess # nosec B404 # security implications are considered
PATH_OF_CURRENT_FILE = os.path.dirname((inspect.stack()[0][1]))
# python3 .github/workflows/extract_package_info.py package_name
# python3 .github/workflows/extract_package_info.py package_version
# python3 .github/workflows/extract_package_info.py install_from_dist
def _extract_info(regex: Pattern[str]) -> str:
with open(os.path.join(PATH_OF_CURRENT_FILE, os.pardir, os.pardir, "setup.py"), "r") as in_file:
content = in_file.read()
match = re.search(regex, content)
if match is None:
raise NotImplementedError("A match in setup.py should always be found.")
output = match.group(1)
print(output) # allow-print
return output
def _run_pip(args: Sequence[str]) -> None:
print(f"About to run pip with args: {args}") # allow-print
results = subprocess.run(args) # nosec B603 # shell is false, but input is secure
if results.returncode != 0:
sys.exit(results.returncode)
def package_name() -> str:
regex = re.compile(r" name=\"(.+)\"")
return _extract_info(regex)
def package_version() -> str:
regex = re.compile(r" version=\"(.+?)\"")
return _extract_info(regex)
def pip_install_dist_wheel() -> None:
path_to_dist = os.path.join(PATH_OF_CURRENT_FILE, os.pardir, os.pardir, "dist")
all_files = glob.glob(f"{path_to_dist}{os.sep}*{package_version()}*.whl")
if len(all_files) != 1:
raise NotImplementedError(
f"There should only be a single whl file in the dist folder matching this package version. Files found: {all_files}"
)
wheel_file = os.path.abspath(all_files[0])
args = ["pip", "install", wheel_file]
_run_pip(args)
def pip_install(test_pypi: Optional[bool] = False) -> None:
args = ["pip", "install", f"{package_name()}=={package_version()}"]
if test_pypi:
args.extend(
[
"--index-url",
"https://test.pypi.org/simple/",
"--extra-index-url",
"https://pypi.org/simple",
]
)
_run_pip(args)
if __name__ == "__main__":
first_arg = sys.argv[1]
if first_arg == "package_name":
package_name()
elif first_arg == "package_version":
package_version()
elif first_arg == "install_from_test_pypi":
pip_install(test_pypi=True)
elif first_arg == "install_from_pypi":
pip_install()
elif first_arg == "install_from_dist":
pip_install_dist_wheel()
|
the-stack_106_21493
|
from enum import Enum
from typing import Union
class LanguageCode(Enum):
ENGLISH = 'en'
ENGLISH_US = 'en_US'
ENGLISH_UK = 'en_UK'
ITALIAN = 'it'
SPANISH = 'es'
SPANISH_SPAIN = 'es_ES'
SPANISH_LATIN_AMERICA = 'es_LA'
GERMAN = 'de'
FRENCH = 'fr'
DUTCH = 'nl'
CHINESE = 'zh'
CHINESE_PRC = 'zh_CN'
CHINESE_HONG_KONG = 'zh_HK'
LANGUAGE_CODES = [x.value for x in LanguageCode]
FALLBACK_LANGUAGE = {
LanguageCode.ENGLISH: [
LanguageCode.ENGLISH_UK,
LanguageCode.ENGLISH_US
],
LanguageCode.ENGLISH_US: [
LanguageCode.ENGLISH,
LanguageCode.ENGLISH_UK
],
LanguageCode.ENGLISH_UK: [
LanguageCode.ENGLISH,
LanguageCode.ENGLISH_US
],
LanguageCode.ITALIAN: [],
LanguageCode.SPANISH: [
LanguageCode.SPANISH_LATIN_AMERICA
],
LanguageCode.SPANISH_SPAIN: [
LanguageCode.SPANISH,
LanguageCode.SPANISH_LATIN_AMERICA
],
LanguageCode.SPANISH_LATIN_AMERICA: [
LanguageCode.SPANISH,
LanguageCode.SPANISH_SPAIN
],
LanguageCode.GERMAN: [],
LanguageCode.FRENCH: [],
LanguageCode.DUTCH: [],
LanguageCode.CHINESE: [
LanguageCode.CHINESE_PRC
],
LanguageCode.CHINESE_PRC: [
LanguageCode.CHINESE
],
LanguageCode.CHINESE_HONG_KONG: []
}
def ensure_language_code(lang: Union[LanguageCode, str]) -> LanguageCode:
"""
Make sure `lang` is a :class:`LanguageCode` value. If not, return
`LanguageCode(lang)` (assuming it's a string).
This is useful for user-facing methods, where input may be a language code
or a string.
Args:
lang: An input language code
Returns:
A LanguageCode object representing `lang`
"""
if isinstance(lang, LanguageCode):
return lang
return LanguageCode(lang)
|
the-stack_106_21496
|
from __future__ import annotations
__all__ = ["Board"]
from endplay.types.deal import Deal
from endplay.types.contract import Contract
from endplay.types.vul import Vul
from endplay.types.player import Player
from endplay.types.bid import Bid
from endplay.types.card import Card
from typing import Optional, Any
from collections.abc import Iterable
class Board:
"""
Class representing a deal along with the play, auction and other table
information
:ivar deal: The deal at the table
:vartype deal: Deal
:ivar auction: The auction at the table
:vartype auction: list[Bid]
:ivar contract: The contract at the table
:vartype contract: Contract
:ivar play: The play history at the table
:vartype play: list[Card]
:ivar board_num: The board number of this deal
:vartype board_num: int
:ivar vul: The board vulnerability. If this isn't defined
(i.e. set to `None`) then it is deduced from `board_num`
:vartype vul: Vul
:ivar dealer: The board dealer. Similarly to `vul` this can
be deduced from `board_num`
:vartype dealer: Player
:ivar claimed: Flag indicating whether the play ended as a result
of a claim
:vartype claimed: bool
:ivar info: A dictionary which contains arbitrary extra information
about the board. The dictionary type used provided case-insensitive
dot-access as a convenience (i.e. `board.info.event` and `board.info.Event`
refer to the same object, but `board.info["event"]` and `board.info["Event"]`
would be considered different). Tabular data can be stored here, any
key ending with (but not equal to) `table` is treated as a table and its value
should be a dictionary containing two keys: `headers` with a list of column names,
and `rows` with a list of the rows. The column names can either be plain strings,
or dictionaries with the keys
* `ordering`: Either `"+"`, `"-"` or `None` depending of if the table is sorted
ascending, descending or unsorted with respect to this column
* `name`: A string value with the name of the column
* `minwidth`: The minimum width that values in this column should be
* `alignment`: `"L"` or `"R"` depending on if this column should be left or right
aligned. Ignored unless `minwidth` is defined
"""
class Info(dict):
"""
Dictionary-like class which alows for case-insensitive dot-access,
for example::
info["Event"] = "WBF 2017"
print(info.event) # WBF 2017
"""
def _find_key(self, key: str):
key = key.casefold()
for k in self:
if k.casefold() == key:
return k
return None
def __getattr__(self, attr: str) -> Any:
key = self._find_key(attr)
if key is not None:
return self[key]
return None
def __setattr__(self, attr: str, value: Any) -> None:
key = self._find_key(attr)
if key is not None:
self[key] = value
else:
self[attr] = value
def __delattr__(self, attr: str) -> None:
key = self._find_key(attr)
if key is not None:
del self[key]
else:
raise KeyError(attr)
def __init__(self,
deal: Optional[Deal] = None,
auction: Optional[Iterable[Bid]] = None,
play: Optional[Iterable[Card]] = None,
board_num: Optional[int] = None,
*,
vul: Optional[Vul] = None,
dealer: Optional[Player] = None,
contract: Optional[Contract] = None,
claimed: bool = False,
**kwargs):
self.deal = deal.copy() if deal is not None else Deal()
self.auction = list(auction) if auction is not None else []
self.play = list(play) if play is not None else []
self.board_num = board_num
self._dealer = dealer
self._vul = vul
self._contract = contract
self.claimed = claimed
self.info = Board.Info(**kwargs)
@property
def dealer(self) -> Player:
"""
Dealer of the board. If not defined, then attempts to
calculate based on the value of `board_num`
"""
if self._dealer is not None:
return self._dealer
elif self.board_num is not None:
return Player.from_board(self.board_num)
else:
return None
@dealer.setter
def dealer(self, value: Player) -> None:
self._dealer = value
@property
def vul(self) -> Vul:
"""
Vulnerability of the board. If not defined, then attempts
to calculate based on the value of `board_num`
"""
if self._vul is not None:
return self._vul
elif self.board_num is not None:
return Vul.from_board(self.board_num)
else:
return None
@vul.setter
def vul(self, value: Vul) -> None:
self._vul = value
@property
def contract(self) -> Contract:
"""
The contract the board was played in. If not provided, then
attempts to calculate based on the auction and play history.
"""
if self._contract is not None:
return self._contract
elif self.auction:
c = Contract.from_auction(self.auction)
if self.play:
from endplay.utils.play import total_tricks, tricks_to_result
c.result = tricks_to_result(
total_tricks(self.play, self.deal.trump),
c.level)
else:
return None
@contract.setter
def contract(self, value: Contract) -> None:
self._contract = value
|
the-stack_106_21498
|
import sys
import tweepy
import numpy as np
from textblob import TextBlob
def tweet_analysis(query):
# get tweets
tweets = tweepy.Cursor(api.search, q = query + " -filter:retweets").items(20)
for tweet in tweets:
phrase = TextBlob(tweet.text)
polarities = []
subjectivities = []
if not is_english(phrase):
try:
phrase = TextBlob(str(phrase.translate(to = "en")))
except:
phrase = phrase
if phrase.sentiment.polarity != 0.0 and phrase.sentiment.subjectivity != 0.0:
polarities.append(phrase.sentiment.polarity)
subjectivities.append(phrase.sentiment.subjectivity)
print("Tweet: " + tweet.text)
print("Polarity: " + str(phrase.sentiment.polarity))
print("Subjectivity: " + str(phrase.sentiment.subjectivity))
print("***************************************")
return {"polarity": polarities, "subjectivity": subjectivities}
def is_english(text):
if text.detect_language() == "en":
return True
return False
def get_polarity_mean(valid_tweets):
try:
if len(valid_tweets["polarity"]) == 0:
return 0
return np.mean(valid_tweets["polarity"])
except:
return 0
def get_weighted_polarity_mean(valid_tweets):
try:
return np.average(valid_tweets["polarity"], weights=valid_tweets["subjectivity"])
except:
return 0
def print_result(mean):
if mean > 0.0:
print("POSITIVE")
elif mean == 0.0:
print("NEUTRAL")
else:
print("NEGATIVE")
# make sure consumer keys and access tokens are included in command line call
if len(sys.argv) != 5:
print("need to add consumer keys and access tokens as command line args")
exit()
# get consumer keys from command line
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
# get access tokens from command line
access_token = sys.argv[3]
access_token_secret = sys.argv[4]
# create auth object for api
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# create api object
api = tweepy.API(auth)
if __name__ == "__main__":
query = input("Enter a query to find: ")
analysis = tweet_analysis(query)
print("WEIGHTED MEAN: " + str(get_weighted_polarity_mean(analysis)))
print_result(get_weighted_polarity_mean(analysis))
print("MEAN: " + str(get_polarity_mean(analysis)))
print_result(get_polarity_mean(analysis))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.