filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_25663 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for Stelzer et al. cluster thresholding algorithm"""
from mvpa2.base import externals
from mvpa2.testing.tools import skip_if_no_external
# TODO a tiny bit also needs statsmodels
skip_if_no_external("statsmodels")
skip_if_no_external("scipy")
from collections import Counter
import numpy as np
import random
from mvpa2.testing import (
assert_array_equal,
assert_raises,
assert_equal,
assert_array_almost_equal,
assert_almost_equal,
assert_true,
assert_false,
)
import mvpa2.algorithms.group_clusterthr as gct
from mvpa2.datasets import Dataset, dataset_wizard
from nose.tools import assert_greater_equal, assert_greater
from mvpa2.testing.sweep import sweepargs
from scipy.ndimage import measurements
from scipy.stats import norm
def test_pval():
def not_inplace_shuffle(x):
x = list(x)
random.shuffle(x)
return x
x = list(range(100000)) * 20
x = np.array(x)
x = x.reshape(20, 100000)
x = x.T
x = np.apply_along_axis(not_inplace_shuffle, axis=0, arr=x)
expected_result = [100000 - 100000 * 0.001] * 20
thresholds = gct.get_thresholding_map(x, p=0.001)
assert_array_equal(thresholds, expected_result)
# works with datasets too
dsthresholds = gct.get_thresholding_map(Dataset(x), p=0.001)
assert_almost_equal(thresholds, dsthresholds)
assert_raises(ValueError, gct.get_thresholding_map, x, p=0.00000001)
x = list(range(0, 100, 5))
null_dist = np.repeat(1, 100).astype(float)[None]
pvals = gct._transform_to_pvals(x, null_dist)
desired_output = np.array(
[
1,
0.95,
0.9,
0.85,
0.8,
0.75,
0.7,
0.65,
0.6,
0.55,
0.5,
0.45,
0.4,
0.35,
0.3,
0.25,
0.2,
0.15,
0.1,
0.05,
]
)
assert_array_almost_equal(desired_output, pvals)
def test_cluster_count():
skip_if_no_external("scipy", min_version="0.10")
# we get a ZERO cluster count of one if there are no clusters at all
# this is needed to keept track of the number of bootstrap samples that yield
# no cluster at all (high treshold) in order to compute p-values when there is no
# actual cluster size histogram
assert_equal(gct._get_map_cluster_sizes([0, 0, 0, 0]), [0])
# if there is at least one cluster: no ZERO count
assert_equal(gct._get_map_cluster_sizes([0, 0, 1, 0]), [1])
for i in range(2): # rerun tests for bool type of test_M
test_M = np.array(
[
[1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
]
)
expected_result = [5, 4, 3, 3, 2, 0, 2] # 5 clusters of size 1,
# 4 clusters of size 2 ...
test_ds = Dataset([test_M])
if i == 1:
test_M = test_M.astype(bool)
test_M_3d = np.hstack((test_M.flatten(), test_M.flatten())).reshape(2, 9, 16)
test_ds_3d = Dataset([test_M_3d])
# expected_result^2
expected_result_3d = np.array([0, 5, 0, 4, 0, 3, 0, 3, 0, 2, 0, 0, 0, 2])
size = 10000 # how many times bigger than test_M_3d
test_M_3d_big = np.hstack((test_M_3d.flatten(), np.zeros(144)))
test_M_3d_big = np.hstack((test_M_3d_big for i in range(size))).reshape(
3 * size, 9, 16
)
test_ds_3d_big = Dataset([test_M_3d_big])
expected_result_3d_big = expected_result_3d * size
# check basic cluster size determination for plain arrays and datasets
# with a single sample
for t, e in (
(test_M, expected_result),
(test_ds, expected_result),
(test_M_3d, expected_result_3d),
(test_ds_3d, expected_result_3d),
(test_M_3d_big, expected_result_3d_big),
(test_ds_3d_big, expected_result_3d_big),
):
assert_array_equal(np.bincount(gct._get_map_cluster_sizes(t))[1:], e)
# old
M = np.vstack([test_M_3d.flatten()] * 10)
# new
ds = dataset_wizard([test_M_3d] * 10)
assert_array_equal(M, ds)
expected_result = Counter(
np.hstack([gct._get_map_cluster_sizes(test_M_3d)] * 10)
)
assert_array_equal(expected_result, gct.get_cluster_sizes(ds))
# test the same with some arbitrary per-feature threshold
thr = 4
labels, num = measurements.label(test_M_3d)
area = measurements.sum(test_M_3d, labels, index=np.arange(labels.max() + 1))
cluster_sizes_map = area[labels] # .astype(int)
thresholded_cluster_sizes_map = cluster_sizes_map > thr
# old
M = np.vstack([cluster_sizes_map.flatten()] * 10)
# new
ds = dataset_wizard([cluster_sizes_map] * 10)
assert_array_equal(M, ds)
expected_result = Counter(
np.hstack([gct._get_map_cluster_sizes(thresholded_cluster_sizes_map)] * 10)
)
th_map = np.ones(cluster_sizes_map.flatten().shape) * thr
# threshold dataset by hand
ds.samples = ds.samples > th_map
assert_array_equal(expected_result, gct.get_cluster_sizes(ds))
# run same test with parallel and serial execution
@sweepargs(n_proc=[1, 2])
def test_group_clusterthreshold_simple(n_proc):
if n_proc > 1:
skip_if_no_external("joblib")
feature_thresh_prob = 0.005
nsubj = 10
# make a nice 1D blob and a speck
blob = np.array([0, 0, 0.5, 3, 5, 3, 3, 0, 2, 0])
blob = Dataset([blob])
# and some nice random permutations
nperms = 100 * nsubj
perm_samples = np.random.randn(nperms, blob.nfeatures)
perms = Dataset(
perm_samples,
sa=dict(chunks=np.repeat(list(range(nsubj)), len(perm_samples) / nsubj)),
fa=dict(fid=list(range(perm_samples.shape[1]))),
)
# the algorithm instance
# scale number of bootstraps to match desired probability
# plus a safety margin to minimize bad luck in sampling
clthr = gct.GroupClusterThreshold(
n_bootstrap=int(3.0 / feature_thresh_prob),
feature_thresh_prob=feature_thresh_prob,
fwe_rate=0.01,
n_blocks=3,
n_proc=n_proc,
)
clthr.train(perms)
# get the FE thresholds
thr = clthr._thrmap
# perms are normally distributed, hence the CDF should be close, std of the distribution
# will scale 1/sqrt(nsubj)
assert_true(
np.abs(
feature_thresh_prob
- (1 - norm.cdf(thr.mean(), loc=0, scale=1.0 / np.sqrt(nsubj)))
)
< 0.01
)
clstr_sizes = clthr._null_cluster_sizes
# getting anything but a lonely one feature cluster is very unlikely
assert_true(max([c[0] for c in list(clstr_sizes.keys())]) <= 1)
# threshold orig map
res = clthr(blob)
#
# check output
#
# samples unchanged
assert_array_equal(blob.samples, res.samples)
# need to find the big cluster
assert_true(len(res.a.clusterstats) > 0)
assert_equal(len(res.a.clusterstats), res.fa.clusters_featurewise_thresh.max())
# probs need to decrease with size, clusters are sorted by size (decreasing)
assert_true(res.a.clusterstats["prob_raw"][0] <= res.a.clusterstats["prob_raw"][1])
# corrected probs for every uncorrected cluster
assert_true("prob_corrected" in res.a.clusterstats.dtype.names)
# fwe correction always increases the p-values (if anything)
assert_true(
np.all(res.a.clusterstats["prob_raw"] <= res.a.clusterstats["prob_corrected"])
)
# check expected cluster sizes, ordered large -> small
assert_array_equal(res.a.clusterstats["size"], [4, 1])
# check max position
assert_array_equal(res.a.clusterlocations["max"], [[4], [8]])
# center of mass: eyeballed
assert_array_almost_equal(
res.a.clusterlocations["center_of_mass"], [[4.429], [8]], 3
)
# other simple stats
# [0, 0, .5, 3, 5, 3, 3, 0, 2, 0]
assert_array_equal(res.a.clusterstats["mean"], [3.5, 2])
assert_array_equal(res.a.clusterstats["min"], [3, 2])
assert_array_equal(res.a.clusterstats["max"], [5, 2])
assert_array_equal(res.a.clusterstats["median"], [3, 2])
assert_array_almost_equal(res.a.clusterstats["std"], [0.866, 0], 3)
# fwe thresholding only ever removes clusters
assert_true(
np.all(
np.abs(res.fa.clusters_featurewise_thresh - res.fa.clusters_fwe_thresh) >= 0
)
)
# FWE should kill the small one
assert_greater(
res.fa.clusters_featurewise_thresh.max(), res.fa.clusters_fwe_thresh.max()
)
# check that the cluster results aren't depending in the actual location of
# the clusters
shifted_blob = Dataset([[0.5, 3, 5, 3, 3, 0, 0, 0, 2, 0]])
shifted_res = clthr(shifted_blob)
assert_array_equal(res.a.clusterstats, shifted_res.a.clusterstats)
# check that it averages multi-sample datasets
# also checks that scenarios work where all features are part of one big
# cluster
multisamp = Dataset(np.arange(30).reshape(3, 10) + 100)
avgres = clthr(multisamp)
assert_equal(len(avgres), 1)
assert_array_equal(avgres.samples[0], np.mean(multisamp.samples, axis=0))
# retrain, this time with data from only a single subject
perms = Dataset(
perm_samples,
sa=dict(chunks=np.repeat(1, len(perm_samples))),
fa=dict(fid=list(range(perms.shape[1]))),
)
clthr.train(perms)
# same blob -- 1st this should work without issues
sglres = clthr(blob)
# NULL estimation does no averaging
# -> more noise -> fewer clusters -> higher p
assert_greater_equal(len(res.a.clusterstats), len(sglres.a.clusterstats))
assert_greater_equal(
np.round(sglres.a.clusterstats[0]["prob_raw"], 4),
np.round(res.a.clusterstats[0]["prob_raw"], 4),
)
# no again for real scientists: no FWE correction
superclthr = gct.GroupClusterThreshold(
n_bootstrap=int(3.0 / feature_thresh_prob),
feature_thresh_prob=feature_thresh_prob,
multicomp_correction=None,
n_blocks=3,
n_proc=n_proc,
)
superclthr.train(perms)
superres = superclthr(blob)
assert_true("prob_corrected" in res.a.clusterstats.dtype.names)
assert_true("clusters_fwe_thresh" in res.fa)
assert_false("prob_corrected" in superres.a.clusterstats.dtype.names)
assert_false("clusters_fwe_thresh" in superres.fa)
# check validity test
assert_raises(
ValueError,
gct.GroupClusterThreshold,
n_bootstrap=10,
feature_thresh_prob=0.09,
n_proc=n_proc,
)
# check mapped datasets
blob = np.array([[0, 0, 0.5, 3, 5, 3, 3, 0, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
blob = dataset_wizard([blob])
# and some nice random permutations
nperms = 100 * nsubj
perm_samples = np.random.randn(*((nperms,) + blob.shape))
perms = dataset_wizard(
perm_samples, chunks=np.repeat(list(range(nsubj)), len(perm_samples) / nsubj)
)
clthr.train(perms)
twodres = clthr(blob)
# finds two clusters of the same size
assert_array_equal(twodres.a.clusterstats["size"], res.a.clusterstats["size"])
# TODO continue with somewhat more real dataset
def test_repeat_cluster_vals():
assert_array_equal(gct.repeat_cluster_vals({1: 2, 3: 1}), [1, 1, 3])
assert_array_equal(gct.repeat_cluster_vals({1: 2, 3: 2, 2: 1}), [1, 1, 2, 3, 3])
assert_array_equal(
gct.repeat_cluster_vals({1: 2, 3: 1}, {1: 0.2, 3: 0.5}), [0.2, 0.2, 0.5]
)
assert_array_equal(
gct.repeat_cluster_vals({1: 2, 3: 2, 2: 1}, {1: "a", 2: "b", 3: "c"}),
["a", "a", "b", "c", "c"],
)
|
the-stack_106_25668 | from typing import TYPE_CHECKING, Union
from rotkehlchen.crypto import sha3
from rotkehlchen.errors import DBUpgradeError
from rotkehlchen.typing import Location, TradeType
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
def v6_deserialize_location_from_db(symbol: str) -> Location:
"""We copy the deserialize_location_from_db() function at v6
This is done in case the function ever changes in the future. Also another
difference is that instead of DeserializationError this throws a DBUpgradeError
"""
if symbol == 'A':
return Location.EXTERNAL
if symbol == 'B':
return Location.KRAKEN
if symbol == 'C':
return Location.POLONIEX
if symbol == 'D':
return Location.BITTREX
if symbol == 'E':
return Location.BINANCE
if symbol == 'F':
return Location.BITMEX
if symbol == 'G':
return Location.COINBASE
if symbol == 'H':
return Location.TOTAL
if symbol == 'I':
return Location.BANKS
if symbol == 'J':
return Location.BLOCKCHAIN
# else
raise DBUpgradeError(
f'Failed to deserialize location. Unknown symbol {symbol} for location found in DB',
)
def v6_deserialize_trade_type_from_db(symbol: str) -> TradeType:
"""We copy the deserialize_trade_type_from_db() function at v6
This is done in case the function ever changes in the future. Also another
difference is that instead of DeserializationError this throws a DBUpgradeError
"""
if symbol == 'A':
return TradeType.BUY
if symbol == 'B':
return TradeType.SELL
if symbol == 'C':
return TradeType.SETTLEMENT_BUY
if symbol == 'D':
return TradeType.SETTLEMENT_SELL
# else
raise DBUpgradeError(
f'Failed to deserialize trade type. Unknown DB symbol {symbol} for trade type in DB',
)
def v6_generate_trade_id(
location: Location,
time: Union[str, int],
trade_type: TradeType,
pair: str,
amount: str,
rate: str,
link: str,
) -> str:
"""We copy the identifier() property of a trade at v6
This is done in case the function ever changes in the future.
"""
source_str = (
str(location) +
str(time) +
str(trade_type) +
pair +
amount +
rate +
link
)
return sha3(source_str.encode()).hex()
def _upgrade_trades_table(db: 'DBHandler') -> None:
cursor = db.conn.cursor()
# This is the data we need from trades table at v6
query = cursor.execute(
"""SELECT time, location, pair, type, amount, rate, fee, fee_currency,
link, notes FROM trades;""",
)
trade_tuples = []
for result in query:
# for each trade get all the relevant data
time = result[0]
db_location = result[1]
pair = result[2]
db_trade_type = result[3]
amount = result[4]
rate = result[5]
fee = result[6]
fee_currency = result[7]
link = result[8]
notes = result[9]
# make sure to deserialize the db enums
location = v6_deserialize_location_from_db(db_location)
trade_type = v6_deserialize_trade_type_from_db(db_trade_type)
new_trade_id = v6_generate_trade_id(
location=location,
time=time,
trade_type=trade_type,
pair=pair,
amount=amount,
rate=rate,
link=link,
)
trade_tuples.append((
new_trade_id,
time,
db_location,
pair,
db_trade_type,
amount,
rate,
fee,
fee_currency,
link,
notes,
))
# We got all the external trades data. Now delete the old table and create
# the new one
cursor.execute('DROP TABLE trades;')
db.conn.commit()
# This is the scheme of the trades table at v7 from db/utils.py
cursor.execute("""
CREATE TABLE IF NOT EXISTS trades (
id TEXT PRIMARY KEY,
time INTEGER,
location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location),
pair VARCHAR[24],
type CHAR(1) NOT NULL DEFAULT ('B') REFERENCES trade_type(type),
amount TEXT,
rate TEXT,
fee TEXT,
fee_currency VARCHAR[10],
link TEXT,
notes TEXT
);""")
db.conn.commit()
# and finally move the data to the new table
cursor.executemany(
'INSERT INTO trades('
' id, '
' time,'
' location,'
' pair,'
' type,'
' amount,'
' rate,'
' fee,'
' fee_currency,'
' link,'
' notes)'
'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
trade_tuples,
)
db.conn.commit()
def upgrade_v6_to_v7(db: 'DBHandler') -> None:
"""Upgrades the DB from v6 to v7
- upgrades trades table to use a new id scheme
"""
_upgrade_trades_table(db)
|
the-stack_106_25671 | from model.film import Film
from model.user import User
#from selenium_fixture import app
def test_add_film(app):
new_film = Film.unic_name()
app.ensure_login_as(User.Admin())
app.add_film(new_film)
assert app.is_film_created(new_film) |
the-stack_106_25672 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentDSOX92004A(agilent90000):
"Agilent Infiniium DSOX92004A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSOX92004A')
super(agilentDSOX92004A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 20e9
self._init_channels()
|
the-stack_106_25674 | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for combinator layers."""
from absl.testing import absltest
import numpy as np
from trax import shapes
import trax.layers as tl
def DivideBy(val): # pylint: disable=invalid-name
"""Returns a simple division layer with n_in == 1 and n_out == 1."""
return tl.Fn('DivideBy', lambda x: x / val)
# TODO(jonni): Consider a more generic home for this utiliity function.
def as_list(outputs):
"""Converts layer outputs to a nested list, for easier equality testing.
Args:
outputs: A tensor or tuple/list of tensors coming from the forward
application of a layer. Each tensor is NumPy ndarray-like, which
complicates simple equality testing (e.g., via `assertEquals`):
such tensors require equality testing to use either `all` (all
elements match) or `any` (at least one element matches), which is not
directly supported in absltest.
Returns:
A nested list structure containing all the output values, but now directly
testable using `assertEquals`.
"""
if isinstance(outputs, (list, tuple)):
return [y.tolist() for y in outputs]
else:
return outputs.tolist()
class SerialTest(absltest.TestCase):
def test_none_is_no_op(self):
layer = tl.Serial(None)
xs = [np.array([1, 2, 3, 4]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3, 4],
[10, 20, 30]])
def test_empty_list_is_no_op(self):
layer = tl.Serial([])
xs = [np.array([1, 2, 3, 4]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3, 4],
[10, 20, 30]])
def test_one_in_one_out(self):
layer = tl.Serial(DivideBy(3))
x = np.array([3, 6, 9, 12])
y = layer(x)
self.assertEqual(as_list(y), [1, 2, 3, 4])
def test_div_div(self):
layer = tl.Serial(DivideBy(2.0), DivideBy(5.0))
x = np.array([10, 20, 30])
y = layer(x)
self.assertEqual(as_list(y), [1, 2, 3])
def test_dup_dup(self):
layer = tl.Serial(tl.Dup(), tl.Dup())
x = np.array([1, 2, 3])
ys = layer(x)
self.assertEqual(as_list(ys), [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
def test_default_name(self):
layer = tl.Serial(tl.Dup(), tl.Dup())
self.assertIn('Serial', str(layer))
def test_custom_name(self):
layer = tl.Serial(tl.Dup(), tl.Dup(), name='Branch')
self.assertIn('Branch', str(layer))
def test_weights(self):
model = tl.Serial(tl.Dense(4), tl.Dense(5), tl.Dense(7))
self.assertIsInstance(model.weights, tuple)
self.assertLen(model.weights, 3)
def test_flat_weights_and_state(self):
model = tl.Serial(tl.Dup(), tl.Dense(5), tl.Serial(tl.Dense(7), tl.Dup()))
sample_input_signature = shapes.signature(np.zeros((2, 3)))
model.init(sample_input_signature)
flat_weights, flat_state = tl.flatten_weights_and_state(
model.weights, model.state)
# Model has 2 pairs of trainable weights: (w, b) for the 2 dense layers.
# So after making them flat, there are 4 trainable weights.
self.assertLen(flat_weights, 4)
self.assertEmpty(flat_state)
model2 = tl.Serial(tl.Dense(5), tl.Dup(), tl.Dense(7))
sig = model2.weights_and_state_signature(sample_input_signature)
weights2, state2 = tl.unflatten_weights_and_state(
flat_weights, flat_state, sig)
model2.weights = weights2
model2.state = state2
self.assertLen(model2.weights, 3)
self.assertEqual(model.weights[1], model2.weights[0])
self.assertEqual(model.weights[2][0], model2.weights[2])
def test_shared_weights(self):
layer = tl.Dense(5)
model = tl.Serial(layer, layer)
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_nested(self):
layer = tl.Dense(5)
model = tl.Serial(layer, tl.Serial(layer))
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1][0], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_double_nested(self):
layer = tl.Dense(5)
model = tl.Serial(tl.Serial(layer), tl.Serial(layer))
sample_input = np.array([1, 2, 3, 4, 5])
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1][0], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_for_shared_serial(self):
layer = tl.Serial(tl.Dense(5), tl.Dense(5))
model = tl.Serial(layer, layer)
sample_input = np.array([1, 2, 3, 4, 5])
# Init gives weights reflecting weight sharing.
weights, _ = model.init(shapes.signature(sample_input))
self.assertIsNot(weights[0], tl.GET_WEIGHTS_FROM_CACHE)
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
# Forward pass runs successfully.
y = model(sample_input)
self.assertEqual(y.shape, (5,))
def test_state(self):
model = tl.Serial(tl.Dense(4), tl.Dense(5), tl.Dense(7))
self.assertIsInstance(model.state, tuple)
self.assertLen(model.state, 3)
def test_set_rng_recurse_two_levels(self):
dense_00 = tl.Dense(2)
dense_01 = tl.Dense(2)
dense_10 = tl.Dense(2)
dense_11 = tl.Dense(2)
layer = tl.Serial(
tl.Serial(dense_00, dense_01),
tl.Serial(dense_10, dense_11),
)
input_signature = shapes.ShapeDtype((1, 2))
_, _ = layer.init(input_signature)
weights = layer.weights
dense_00_w, dense_00_b = weights[0][0]
dense_01_w, dense_01_b = weights[0][1]
dense_10_w, dense_10_b = weights[1][0]
dense_11_w, dense_11_b = weights[1][1]
# Setting rng's recursively during init should yield differing weights.
self.assertFalse(np.array_equal(dense_00_w, dense_01_w))
self.assertFalse(np.array_equal(dense_00_b, dense_01_b))
self.assertFalse(np.array_equal(dense_10_w, dense_11_w))
self.assertFalse(np.array_equal(dense_10_b, dense_11_b))
class ParallelTest(absltest.TestCase):
def test_dup_dup(self):
layer = tl.Parallel(tl.Dup(), tl.Dup())
xs = [np.array([1, 2, 3]),
np.array([10, 20])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3],
[1, 2, 3],
[10, 20],
[10, 20]])
def test_div_div(self):
layer = tl.Parallel(DivideBy(0.5), DivideBy(3.0))
xs = [np.array([1, 2, 3]),
np.array([30, 60])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[2, 4, 6],
[10, 20]])
def test_two_no_ops(self):
layer = tl.Parallel([], None)
xs = [np.array([1, 2, 3]),
np.array([10, 20])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3],
[10, 20]])
def test_default_name(self):
layer = tl.Parallel(tl.Dup(), tl.Dup())
self.assertIn('Parallel', str(layer))
def test_custom_name(self):
layer = tl.Parallel(tl.Dup(), tl.Dup(), name='DupDup')
self.assertIn('DupDup', str(layer))
def test_weights(self):
model = tl.Parallel(tl.Dense(3), tl.Dense(5))
self.assertIsInstance(model.weights, tuple)
self.assertLen(model.weights, 2)
def test_shared_weights(self):
layer = tl.Dense(5)
model = tl.Parallel(layer, layer)
sample_input = (np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]))
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_nested(self):
layer = tl.Dense(5)
model = tl.Parallel([layer, tl.Dense(2)],
[layer, tl.Dense(2)])
sample_input = (np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]))
weights, _ = model.init(shapes.signature(sample_input))
self.assertIs(weights[1][0], tl.GET_WEIGHTS_FROM_CACHE)
def test_shared_weights_for_shared_parallel(self):
layer = tl.Parallel(tl.Dense(5), tl.Dense(7))
model = tl.Parallel(layer, layer)
sample_input = [
np.array([1, 2, 3]),
np.array([10, 20, 30]),
np.array([100, 200, 300]),
np.array([1000, 2000, 3000]),
]
# Init gives weights reflecting weight sharing.
weights, _ = model.init(shapes.signature(sample_input))
self.assertIsNot(weights[0], tl.GET_WEIGHTS_FROM_CACHE)
self.assertIs(weights[1], tl.GET_WEIGHTS_FROM_CACHE)
# Forward pass runs successfully.
y0, y1, y2, y3 = model(sample_input)
self.assertEqual(y0.shape, (5,))
self.assertEqual(y1.shape, (7,))
self.assertEqual(y2.shape, (5,))
self.assertEqual(y3.shape, (7,))
def test_state(self):
model = tl.Parallel(tl.Dense(3), tl.Dense(5))
self.assertIsInstance(model.state, tuple)
self.assertLen(model.state, 2)
class ConcatenateTest(absltest.TestCase):
def test_n_in_n_out(self):
layer = tl.Concatenate()
self.assertEqual(layer.n_in, 2)
self.assertEqual(layer.n_out, 1)
def test_with_defaults(self):
layer = tl.Concatenate() # Default n_items=2, axis=-1
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[1, 2, 3, 10, 20, 30],
[4, 5, 6, 40, 50, 60]])
def test_axis_0(self):
layer = tl.Concatenate(axis=0)
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]])]
y = layer(xs)
self.assertEqual(as_list(y), [[1, 2, 3],
[4, 5, 6],
[10, 20, 30],
[40, 50, 60]])
def test_axis_1(self):
layer = tl.Concatenate(axis=1)
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]])]
y = layer(xs)
self.assertEqual(as_list(y), [[1, 2, 3, 10, 20, 30],
[4, 5, 6, 40, 50, 60]])
def test_n_items_is_not_default(self):
layer = tl.Concatenate(n_items=3)
xs = [np.array([[1, 2, 3],
[4, 5, 6]]),
np.array([[10, 20, 30],
[40, 50, 60]]),
np.array([[100, 200, 300],
[400, 500, 600]])]
y = layer(xs)
self.assertEqual(y.shape, (2, 9))
self.assertEqual(as_list(y), [[1, 2, 3, 10, 20, 30, 100, 200, 300],
[4, 5, 6, 40, 50, 60, 400, 500, 600]])
def test_repr(self):
layer = tl.Concatenate()
self.assertEqual(repr(layer), 'Concatenate_in2')
layer = tl.Concatenate(axis=0)
self.assertEqual(repr(layer), 'Concatenate_axis0_in2')
layer = tl.Concatenate(axis=1)
self.assertEqual(repr(layer), 'Concatenate_axis1_in2')
layer = tl.Concatenate(n_items=3)
self.assertEqual(repr(layer), 'Concatenate_in3')
class BranchTest(absltest.TestCase):
def test_noop_dup(self):
layer = tl.Branch([], tl.Dup())
x = np.array([1, 2, 3])
ys = layer(x)
self.assertEqual(as_list(ys), [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
def test_add_div(self):
layer = tl.Branch(tl.Add(), DivideBy(0.5))
xs = [np.array([1, 2, 3]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[11, 22, 33],
[2, 4, 6]])
def test_one_sublayer(self):
layer = tl.Branch(DivideBy(0.5))
x = np.array([1, 2, 3])
ys = layer(x)
self.assertEqual(as_list(ys), [2, 4, 6])
def test_default_name(self):
layer = tl.Branch(tl.Add(), DivideBy(0.5))
self.assertIn('Branch', str(layer))
class SelectTest(absltest.TestCase):
def test_computes_n_in(self):
layer = tl.Select([0, 0])
self.assertEqual(layer.n_in, 1)
layer = tl.Select([1, 0])
self.assertEqual(layer.n_in, 2)
layer = tl.Select([2])
self.assertEqual(layer.n_in, 3)
def test_given_n_in(self):
layer = tl.Select([0], n_in=2)
self.assertEqual(layer.n_in, 2)
layer = tl.Select([0], n_in=3)
self.assertEqual(layer.n_in, 3)
def test_first_of_3(self):
layer = tl.Select([0], n_in=3)
xs = [np.array([1, 2, 3]),
np.array([10, 20]),
np.array([100])]
y = layer(xs)
self.assertEqual(as_list(y), [1, 2, 3])
def test_second_of_3(self):
layer = tl.Select([1], n_in=3)
xs = [np.array([1, 2, 3]),
np.array([10, 20]),
np.array([100])]
y = layer(xs)
self.assertEqual(as_list(y), [10, 20])
class DropTest(absltest.TestCase):
def test_drop(self):
layer = tl.Drop()
x = np.array([1, 2, 3])
y = layer(x)
self.assertEqual(as_list(y), [])
class SwapTest(absltest.TestCase):
def test_swap(self):
layer = tl.Swap()
xs = [np.array([1, 2, 3]),
np.array([10, 20, 30])]
ys = layer(xs)
self.assertEqual(as_list(ys), [[10, 20, 30],
[1, 2, 3]])
class SerialWithSideOutputsTest(absltest.TestCase):
def test_serial_with_side_outputs_div_div(self):
def some_layer():
return tl.Parallel(DivideBy(2.0), DivideBy(5.0))
layer = tl.SerialWithSideOutputs([some_layer(), some_layer()])
xs = (np.array([1, 2, 3]),
np.array([10, 20, 30, 40, 50]),
np.array([100, 200]))
ys = layer(xs)
output_shapes = [y.shape for y in ys]
self.assertEqual(output_shapes, [(3,), (5,), (2,)])
class ScanTest(absltest.TestCase):
def _AddWithCarry(self): # pylint: disable=invalid-name
del self
def f(x, carry):
res = x + carry
return res, res # output and carry are the same
return tl.Fn('AddWithCarry', f, n_out=2)
def test_default_axis(self):
layer = tl.Scan(self._AddWithCarry())
xs = [
np.array([[0, 1, 2, 3],
[0, 10, 20, 30],
[0, 100, 200, 300]]),
np.array([9000, 8000, 7000, 6000])
]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[9000, 8001, 7002, 6003],
[9000, 8011, 7022, 6033],
[9000, 8111, 7222, 6333]
],
[9000, 8111, 7222, 6333]
])
def test_axis_1(self):
layer = tl.Scan(self._AddWithCarry(), axis=1)
xs = [
np.array([[0, 1, 2, 3],
[0, 10, 20, 30],
[0, 100, 200, 300]]),
np.array([9000,
8000,
7000])
]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[9000, 9001, 9003, 9006],
[8000, 8010, 8030, 8060],
[7000, 7100, 7300, 7600]
],
[9006,
8060,
7600]
])
def test_multi_input(self):
def _MultiInputFn(): # pylint: disable=invalid-name
def f(a, b, carry):
return a + b, b, carry + 1
return tl.Fn('MultiInputFn', f, n_out=2)
layer = tl.Scan(_MultiInputFn(), axis=1)
xs = [
np.array([[0, 1, 2],
[0, 10, 20]]),
np.array([[4, 5, 6],
[40, 50, 60]]),
np.array([9000,
8000])
]
ys = layer(xs)
self.assertEqual(as_list(ys),
[[[4, 6, 8],
[40, 60, 80]],
[[4, 5, 6],
[40, 50, 60]],
[9003,
8003]
])
def test_no_carry(self):
def _AddOne(): # pylint: disable=invalid-name
return tl.Fn('AddOne', lambda x: x + 1)
layer = tl.Scan(_AddOne(), n_carry=0)
x = np.array([[1, 3, 7],
[10, 30, 70]])
y = layer(x)
self.assertEqual(as_list(y), [[2, 4, 8],
[11, 31, 71]])
class BatchLeadingAxesTest(absltest.TestCase):
def _Id3Dim(self): # pylint: disable=invalid-name
del self
def f(x):
assert len(x.shape) == 3
return x
return tl.Fn('Id3Dim', f, n_out=2)
def test_2axes(self):
layer = tl.BatchLeadingAxes(self._Id3Dim(), n_last_axes_to_keep=2)
ys = layer(np.zeros((3, 4, 5)))
self.assertEqual(ys.shape, (3, 4, 5))
ys = layer(np.zeros((2, 3, 4, 5)))
self.assertEqual(ys.shape, (2, 3, 4, 5))
ys = layer(np.zeros((1, 2, 3, 4, 5)))
self.assertEqual(ys.shape, (1, 2, 3, 4, 5))
if __name__ == '__main__':
absltest.main()
|
the-stack_106_25676 | from typing import Optional
from aws_xray_sdk import global_sdk_config
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core.async_recorder import AsyncSubsegmentContextManager
from aws_xray_sdk.core.models.dummy_entities import DummySegment
from aws_xray_sdk.core.models.subsegment import (
SubsegmentContextManager,
subsegment_decorator,
is_already_recording,
)
from aws_xray_sdk.core.exceptions.exceptions import SegmentNotFoundException
from aws_xray_sdk.core.exceptions import exceptions
from incendiary.loggers import error_logger
CAPTURE_WARNING = (
"[INCENDIARY] Incendiary has NOT been initialized for capture. "
"Refer to README for more information: {name}"
)
class IncendiaryAsyncSubsegmentContextManager(AsyncSubsegmentContextManager):
"""
A context manager that starts and ends a segment.
"""
def __init__(self, instance, *args, **kwargs):
self.instance = instance
super(IncendiaryAsyncSubsegmentContextManager, self).__init__(
*args, **kwargs
)
@subsegment_decorator
async def __call__(self, wrapped, instance, args, kwargs):
if is_already_recording(wrapped):
# The wrapped function is already decorated, the subsegment will be created later,
# just return the result
return await wrapped(*args, **kwargs)
func_name = self.name
if not func_name:
func_name = wrapped.__name__
if not global_sdk_config.sdk_enabled() or self.instance.app is None:
try:
segment = self.recorder.current_segment()
except SegmentNotFoundException:
segment = DummySegment(func_name)
self.recorder.context.put_segment(segment)
finally:
if segment is None:
error_logger.warning(CAPTURE_WARNING.format(name=func_name))
elif (
hasattr(self.instance.app, "initialized_plugins")
and "incendiary"
not in self.instance.app.initialized_plugins
):
error_logger.warning(CAPTURE_WARNING.format(name=func_name))
try:
return await self.recorder.record_subsegment_async(
wrapped,
instance,
args,
kwargs,
name=func_name,
namespace="local",
meta_processor=None,
)
except exceptions.AlreadyEndedException:
return await wrapped(*args, **kwargs)
class CaptureMixin:
@classmethod
def capture_async(
cls, name: Optional[str] = None
) -> IncendiaryAsyncSubsegmentContextManager:
"""
A decorator that records enclosed function or method
in a subsegment. It only works with asynchronous function
:param name: The name of the subsegment. If not specified, the function name will be used.
"""
return IncendiaryAsyncSubsegmentContextManager(
cls, xray_recorder, name=name
)
@classmethod
def capture(cls, name: Optional[str] = None) -> SubsegmentContextManager:
"""
A decorator that records decorated callable in a subsegment.
:param name: The name of the subsegment. If not specified the function name will be used.
"""
return SubsegmentContextManager(xray_recorder, name=name)
#
|
the-stack_106_25677 | from sklearn import svm, grid_search, datasets
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
# Use spark_sklearn’s grid search instead:
from spark_sklearn import GridSearchCV
iris = datasets.load_iris()
param_grid = {"max_depth": [3, None],
"max_features": [1, 2, 4],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": [30, 70, 120]}
gs = grid_search.GridSearchCV(RandomForestClassifier(), param_grid=param_grid, verbose=1)
gs.fit(iris.data, iris.target)
print(gs.best_params_)
# Save out the best model
joblib.dump(gs.best_estimator_, 'iris.pkl')
|
the-stack_106_25679 | """
OCCAM
Copyright (c) 2011-2017, SRI International
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of SRI International nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
import os
import re
import sys
import shutil
import logging
from . import provenance
from . import config
def checkOccamLib():
occamlib = config.get_occamlib_path()
if occamlib is None or not os.path.exists(occamlib):
sys.stderr.write('The occam library was not found. RTFM.\n')
return False
return True
def get_flag(flags, flag, default=None):
for (x, y) in flags:
if x == '--{0}'.format(flag):
return y
return default
def get_work_dir(flags):
d = get_flag(flags, 'work-dir')
if d is None:
return os.getcwd()
return os.path.abspath(d)
def get_whitelist(flags):
wl = get_flag(flags, 'keep-external')
if wl is not None:
return os.path.abspath(wl)
return None
def get_amalgamation(flags):
amalg = get_flag(flags, 'amalgamate')
if amalg is not None:
return os.path.abspath(amalg)
return None
def get_manifest(args):
manifest = None
if not args:
sys.stderr.write('\nNo manifest file specified\n\n')
return manifest
try:
manifest_file = args[0]
if not os.path.exists(manifest_file):
sys.stderr.write('\nManifest file {0} not found\n\n'.format(manifest_file))
elif not os.path.isfile(manifest_file):
sys.stderr.write('\nManifest file {0} not a file\n\n'.format(manifest_file))
else:
manifest = json.load(open(manifest_file, 'r'))
except Exception:
sys.stderr.write('\nReading and parsing the manifest file {0} failed\n\n'.format(args[0]))
return manifest
def make_work_dir(d):
if not os.path.exists(d):
sys.stderr.write('making working directory... "{0}"\n'.format(d))
os.mkdir(d)
if not os.path.isdir(d):
sys.stderr.write('working directory "{0}" is not a directory\n'.format(d))
return False
return True
def sanity_check_manifest(manifest):
""" Nurse maid the users.
"""
manifest_keys = ['ldflags', 'static_args', 'name', 'native_libs', 'binary', 'modules']
old_manifest_keys = ['modules', 'libs', 'search', 'shared']
new_manifest_keys = ['main', 'binary', 'dynamic_args', 'lib_spec', 'main_spec']
dodo_manifest_keys = ['watch']
replaces = {'modules': 'main', 'libs': 'modules', 'search': 'ldflags'}
warnings = [False]
def cr(warnings):
""" I like my warnings to stand out.
"""
if not warnings[0]:
warnings[0] = True
sys.stderr.write('\n')
if manifest is None:
sys.stderr.write('\nManifest is None.\n')
return False
if not isinstance(manifest, dict):
sys.stderr.write('\nManifest is not a dictionary: {0}.\n'.format(type(manifest)))
return False
for key in manifest:
if key in manifest_keys:
continue
if key in dodo_manifest_keys:
cr(warnings)
sys.stderr.write('Warning: "{0}" is no longer supported; ignoring.\n'.format(key))
continue
if key in old_manifest_keys:
cr(warnings)
sys.stderr.write('Warning: old style key "{0}" is DEPRECATED, use {1}.\n'.format(key, replaces[key]), )
continue
if not key in new_manifest_keys:
cr(warnings)
sys.stderr.write('Warning: "{0}" is not a recognized key; ignoring.\n'.format(key))
continue
return True
def get_int(n):
if n is None:
return 0
elif isinstance(n, int) or isinstance(n, unicode):
return n
elif isinstance(n, str):
try:
return int(n)
except ValueError:
pass
return None
def check_manifest(manifest):
ok = sanity_check_manifest(manifest)
if not ok:
return (False, )
main = manifest.get('main')
if main is None:
sys.stderr.write('No modules in manifest\n')
return (False, )
binary = manifest.get('binary')
if binary is None:
sys.stderr.write('No binary in manifest\n')
return (False, )
modules = manifest.get('modules')
if modules is None:
sys.stderr.write('No libs in manifest\n')
modules = []
native_libs = manifest.get('native_libs')
if native_libs is None:
native_libs = []
ldflags = manifest.get('ldflags')
if ldflags is None:
ldflags = []
static_args = manifest.get('static_args')
dynamic_args = manifest.get('dynamic_args')
dynamic_args = get_int(dynamic_args)
if dynamic_args is None:
sys.stderr.write('Field dynamic_args in manifest must be a int or string representing a int\n')
return (False, )
name = manifest.get('name')
if name is None:
sys.stderr.write('No name in manifest\n')
return (False, )
lib_spec = manifest.get('lib_spec')
if lib_spec is None:
lib_spec = []
main_spec = manifest.get('main_spec')
if main_spec is None:
main_spec = []
return (True, main, binary, modules, native_libs, ldflags, static_args, name, dynamic_args, \
lib_spec, main_spec)
#iam: used to be just os.path.basename; but now when we are processing trees
# the leaf names are not necessarily unique.
def prevent_collisions(x):
folders = []
path = x
while 1:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
if path == "" or path == os.sep:
break
folders.reverse()
return "_".join(folders)
bit_code_pattern = re.compile(r'\.bc$', re.IGNORECASE)
def populate_work_dir(module, libs, lib_spec, main_spec, work_dir):
files = {}
for x in [module] + libs + lib_spec + main_spec :
if bit_code_pattern.search(x):
bn = prevent_collisions(x)
target = os.path.join(work_dir, bn)
if os.path.abspath(x) != target:
shutil.copyfile(x, target)
idx = target.rfind('.bc')
files[x] = provenance.FileStream(target[:idx], 'bc')
else:
sys.stderr.write('Ignoring {0}\n'.format(x))
return files
def makeLogfile(logfile):
if not os.path.exists(logfile):
_, path_filename = os.path.splitdrive(logfile)
path, _ = os.path.split(path_filename)
if not os.path.exists(path):
os.mkdir(path)
def setLogger():
logfile = config.get_logfile()
logger = logging.getLogger()
makeLogfile(os.path.realpath(logfile))
hdlr = logging.FileHandler(logfile)
hdlr.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(hdlr)
levels = {'CRITICAL' : logging.CRITICAL,
'ERROR' : logging.ERROR,
'WARNING' : logging.WARNING,
'INFO' : logging.INFO,
'DEBUG' : logging.DEBUG}
level = os.getenv('OCCAM_LOGLEVEL', None)
if level is not None:
level = levels[level]
if level is None:
level = logging.WARNING
logger.setLevel(level)
logger.info(">> %s\n", ' '.join(sys.argv))
def write_timestamp(msg):
import datetime
dt = datetime.datetime.now ().strftime ('%d/%m/%Y %H:%M:%S')
sys.stderr.write("[%s] %s...\n" % (dt, msg))
def is_exec (fpath):
if fpath == None: return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, _ = os.path.split(program)
if fpath:
if is_exec (program): return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exec (exe_file): return exe_file
return None
# seaopt is a customized version of LLVM opt that is more
# friendly to tools like crab and seahorn.
def found_seaopt():
opt = which('seaopt')
if opt is not None:
return True
else:
return False
def get_opt(use_seaopt = False):
opt = None
if use_seaopt:
opt = which('seaopt')
if opt is None:
opt = config.get_llvm_tool('opt')
if opt is None:
raise IOError('opt was not found')
return opt
# Try to find ROPgadget binary
def get_ropgadget():
ropgadget = None
if 'ROPGADGET' in os.environ: ropgadget = os.environ ['ROPGADGET']
if not is_exec(ropgadget): ropgadget = which('ropgadget')
if not is_exec(ropgadget): ropgadget = which('ROPgadget.py')
return ropgadget
# Try to find seahorn binary
def get_seahorn():
seahorn = None
if 'SEAHORN' in os.environ: seahorn = os.environ ['SEAHORN']
if not is_exec(seahorn): seahorn = which('sea')
return seahorn
# Try to find clam binary
def get_clam():
clam = None
if 'CLAM' in os.environ: clam = os.environ ['CLAM']
if not is_exec(clam): clam = which('clam')
return clam
|
the-stack_106_25680 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : Query_Dop并行查询
Case Name : 创建范围分区表,设置query_dop=2,使用简单查询(无group by 无join)执行explain,查看是否启用并行查询
Description :
1、创建范围分区表
2、向分区表中插入数据
3、设置query_dop参数值为2
4、重启数据库集群使参数生效
5、对分区表执行analyze操作
6、使用explain查看分区表是否启用并行查询
7、清理环境
Expect :
1、创建范围分区表
2、插入数据成功
3、设置query_dop参数值为2成功
4、重启数据库集群成功
5、对分区表执行analyze操作成功
6、使用explain查看分区表,成功启用并行查询
7、清理环境成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class QueryDopCase(unittest.TestCase):
def setUp(self):
self.logger = Logger()
self.logger.info(
"---Opengauss_Function_Tools_Query_Dop_Case0012开始执行---")
self.constant = Constant()
self.commonsh = CommonSH("PrimaryDbUser")
self.common = Common()
self.initial_query_dop = self.common.show_param("query_dop")
self.t_name = "t_score_0012"
def test_query_dop(self):
step1_text = "---step1:创建范围分区表;expect:建表成功---"
self.logger.info(step1_text)
sql_cmd1 = f'''drop table if exists {self.t_name};
create table {self.t_name}(
col_id int,
col_name char(30),
col_score int,
col_course char(30))
partition by range (col_score)
(partition t_score_0012_p1
values less than (60),
partition t_score_0012_p2
values less than (80),
partition t_score_0012_p3
values less than (1000001));'''
self.logger.info(sql_cmd1)
sql_res1 = self.commonsh.execut_db_sql(sql_cmd1)
self.logger.info(sql_res1)
self.assertIn("CREATE TABLE", sql_res1, "执行失败" + step1_text)
step2_text = "---step2:为范围分区表插入数据;expect:插入数据成功---"
sql_cmd2 = f'''insert into {self.t_name} values(
generate_series(1, 1000000),
'name',
generate_series(1, 1000000),
'course');'''
self.logger.info(sql_cmd2)
sql_res2 = self.commonsh.execut_db_sql(sql_cmd2)
self.logger.info(sql_res2)
self.assertIn("INSERT", sql_res2, "执行失败:" + step2_text)
step3_text = "---step3:设置query_dop参数值为2;expect:设置成功---"
self.logger.info(step3_text)
guc_cmd = self.commonsh.execute_gsguc("set",
self.constant.GSGUC_SUCCESS_MSG,
"query_dop =2")
self.logger.info(guc_cmd)
self.assertTrue(guc_cmd, "执行失败:" + step3_text)
step4_text = "---step4:重启数据库集群;expect:重启成功---"
self.logger.info(step4_text)
gs_cmd = self.commonsh.restart_db_cluster()
self.logger.info(gs_cmd)
self.logger.info("---检查数据库状态是否正常---")
status = self.commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
step5_text = "---step5:对分区表执行analyze操作;expect:操作成功---"
self.logger.info(step5_text)
analyse_res = self.commonsh.execut_db_sql(f'analyze {self.t_name}')
self.logger.info(analyse_res)
self.assertIn("ANALYZE", analyse_res, "执行失败:" + step5_text)
step6_text = "---step6:explain查看分区表是否启用并行查询;expect:并行查询启用成功---"
self.logger.info(step6_text)
explain_res = self.commonsh.execut_db_sql(
f'explain select count(*) from {self.t_name}')
self.logger.info(explain_res)
self.assertIn("Streaming(type: LOCAL GATHER dop: 1/2)",
explain_res, "执行失败:" + step6_text)
def tearDown(self):
self.logger.info("---清理环境---")
drop_text = "---删除分区表---"
self.logger.info(drop_text)
drop_cmd = f'drop table if exists {self.t_name};'
self.logger.info(drop_cmd)
drop_res = self.commonsh.execut_db_sql(drop_cmd)
self.logger.info(drop_res)
reset_text = "---重置query_dop参数值---"
self.logger.info(reset_text)
re_cmd = self.commonsh.execute_gsguc("set",
self.constant.GSGUC_SUCCESS_MSG,
f"query_dop = "
f"{self.initial_query_dop}")
self.logger.info(re_cmd)
restart_text = "---重启数据库集群---"
self.logger.info(restart_text)
restart_cmd = self.commonsh.restart_db_cluster()
self.logger.info(restart_cmd)
self.logger.info("---检查数据库状态是否正常---")
status_res = self.commonsh.get_db_cluster_status()
self.assertIn("DROP TABLE", drop_res, "执行失败" + drop_text)
self.assertTrue(re_cmd, "执行失败:" + reset_text)
self.assertTrue("Degraded" in status_res or "Normal" in status_res)
self.logger.info(
"---Opengauss_Function_Tools_Query_Dop_Case0012执行结束---")
|
the-stack_106_25681 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import unittest
import requests
from django.contrib.auth.models import User
from django.test import TestCase
from museum_site.common import *
from museum_site.constants import *
from museum_site.forms import *
from museum_site.models import *
from museum_site.templatetags.site_tags import *
class URLStatusTest(TestCase):
@unittest.skip("Skipping test_url_status()")
def test_url_status(self):
from museum_site.urls import urlpatterns
statuses = {200: True, 301: True, 302: True} # Valid statuses
expected_statuses = [200, 301, 302]
for p in urlpatterns:
# Special patterns
if str(p.pattern) == ".":
p.pattern = ""
elif str(p.pattern) == "zeta-live/":
p.pattern = "zeta-live/?pk=1015&world=DEMO.ZZT&start=7"
elif str(p.pattern) == "zeta-launcher/":
p.pattern = "zeta-live/?pk=1015&world=DEMO.ZZT&start=7"
url = HOST + str(p.pattern)
# Replace args
url = url.replace("<int:article_id>", "2")
url = url.replace("<int:page>", "1")
url = url.replace("<slug:slug>", "url-status-test-slug")
url = url.replace("<slug:category>", "closer-look")
url = url.replace("<str:letter>", "z")
url = url.replace("<str:filename>", "zzt.zip")
url = url.replace("<int:pk>", "1015")
url = url.replace("<int:user_id>", "1")
url = url.replace("<str:unused_slug>", "account-name")
url = url.replace("<int:series_id>", "1")
url = url.replace("<slug:slug>", "slug")
url = url.replace("<str:token>", "8A194C8A21493395")
# More oddball features that will likely be cut
url = url.replace("<int:phase>", "1")
url = url.replace("<str:section>", "play")
r = requests.head(url)
if r.status_code >= 400:
print(r.status_code, url, "[{}]".format(p.pattern))
statuses[r.status_code] = True
unique_statuses = list(statuses.keys())
unique_statuses.sort()
print(unique_statuses)
self.assertEqual(unique_statuses, expected_statuses)
class FileviewerStatusTest(TestCase):
def test_file_viewer_url_status(self):
qs = File.objects.all()
for f in qs:
print(f)
self.assertEqual(1 + 1, 2)
class MetaTagTest(TestCase):
def test_index(self):
valid = {}
expected = {"og:image": 1, "og:url": 1, "og:title": 1}
tags = meta_tags(path="/").split("\n")
for tag in tags:
if tag == (
'<meta property="og:image" '
'content="{}static/images/og_default.jpg">'.format(HOST)
):
valid["og:image"] = 1
elif tag == '<meta property="og:url" content="{}">'.format(HOST):
valid["og:url"] = 1
elif tag == '<meta property="og:title" content="Museum of ZZT">':
valid["og:title"] = 1
self.assertEqual(valid, expected)
def test_default(self):
valid = {}
expected = {"og:image": 1, "og:url": 1, "og:title": 1}
tags = meta_tags(path="/credits/").split("\n")
for tag in tags:
if tag == (
'<meta property="og:image" '
'content="{}static/images/og_default.jpg">'.format(HOST)
):
valid["og:image"] = 1
elif tag == '<meta property="og:url" content="{}credits/">'.format(
HOST
):
valid["og:url"] = 1
elif tag == '<meta property="og:title" content="Museum of ZZT">':
valid["og:title"] = 1
self.assertEqual(valid, expected)
class ZGameFormTest(TestCase):
def test_blank_form(self):
form = ZGameForm(data={})
errors = form.errors.get_json_data()
self.assertEqual(
list(errors.keys()),
["zfile", "title", "author", "genre", "language"]
)
class FileTest(TestCase):
@classmethod
def setUpTestData(cls):
File.objects.create(
filename="zzt.zip",
title="ZZT v3.2 (Registered)",
author="Tim Sweeney",
size=1234,
genre="Official/Puzzle/Adventure/Registered",
zeta_config=None,
)
File.objects.create(
filename="ZOMBINAT.ZIP",
title="The Zombinator",
author="Mazeo",
size=1234,
genre="Adventure",
zeta_config=None,
)
File.objects.create(
filename="thetamag1.zip",
title="ThetaMag #1",
author="Theta14",
size=1234,
genre="Adventure",
zeta_config=None,
)
def test_sorted_genre(self):
""" Genre should be sorted alphabetically after save """
f = File.objects.get(pk=1)
self.assertEqual(f.genre, "Adventure/Official/Puzzle/Registered")
def test_letter_identification(self):
letters = File.objects.filter(pk__lte=3).order_by("id").values_list(
"letter", flat=True
)
self.assertEqual(list(letters), ["z", "z", "t"])
class ArticleTest(TestCase):
@classmethod
def setUpTestData(cls):
Article.objects.create(
title='Test Article 1: "Hello World"',
)
class ReviewTest(TestCase):
@classmethod
def setUpTestData(cls):
info = [
("Alpha", "Alpha@localhost", "password123"),
("Beta", "Beta@localhost", "password123"),
("Gamma", "Gamma@localhost", "password123"),
]
for i in info:
u = User.objects.create_user(i[0], i[1], i[2])
Profile.objects.create(user=u, patron_email=u.email)
f = File.objects.create(
filename="zzt.zip",
title="ZZT v3.2 (Registered)",
author="Tim Sweeney",
size=1234,
genre="Official/Puzzle/Adventure/Registered",
zeta_config=None,
)
Review.objects.create(
zfile=f,
user_id=1,
title="Test Review Title Logged In User",
author="IGNORED BECAUSE LOGGED IN",
content="Body of *my* review",
rating=5.0,
date="2022-01-01",
ip="127.0.0.1"
),
Review.objects.create(
zfile=f,
user_id=None,
title="Test Review Title Anon User",
author="U.N. Owen",
content="Body of *my* review",
rating=5.0,
date="2022-01-01",
ip="127.0.0.1"
)
def test_review_author(self):
r = Review.objects.get(pk=1)
self.assertEqual(r.get_author(), "Alpha")
r = Review.objects.get(pk=2)
self.assertEqual(r.get_author(), "U.N. Owen")
"""
http://django.pi:8000/article/f/frost1.zip
http://django.pi:8000/article/406/livestream-frost-1-power
http://django.pi:8000/file/f/frost1.zip
http://django.pi:8000/patron-articles/
http://django.pi:8000/user/profile/1/dr_dos/
"""
|
the-stack_106_25684 | # a basic script to run the case in this directory
import sys,os
from mpi4py import MPI
from pygeo import *
from pyspline import *
from idwarp import *
import numpy
gcomm = MPI.COMM_WORLD
meshOptions = {
'gridFile':os.getcwd(),
'fileType':'openFoam',
'symmetryPlanes':[[[0,0,0], [0,1,0]]],
'aExp':3,
'bExp':5,
'alpha':1.0,
'LdefFact':.20,
}
mesh = USMesh(options=meshOptions, comm=gcomm)
coords0 = mesh.getSurfaceCoordinates()
# setup FFD
FFDFile = './FFD/globalFFD.fmt'
DVGeo = DVGeometry(FFDFile)
# Setup curves for ref_axis
x = [-2.,0.,0.1,1.044,5.]
y = [0.1,0.1,0.1,0.1,0.1]
z = [0.1,0.1,0.1,0.1,0.1]
nLength = len(x)
c1 = pySpline.Curve(x=x, y=y, z=z, k=2)
DVGeo.addRefAxis('bodyAxis', curve = c1,axis='z')
DVGeoChild = DVGeometry('./FFD/bodyFittedFFD.fmt', child=True)
# Setup curves for ref_axis
x1 = [0.,0.1,0.862,1.044]
y1 = [0.1,0.1,0.1,0.1]
z1 = [0.194,0.194,0.194,0.13]
#z1 = [0.338,0.338,0.338,0.21]
#z1 = [0.338,0.338,0.338,0.338]
nLengthChild = len(x1)
c2 = pySpline.Curve(x=x1, y=y1, z=z1, k=2)
DVGeoChild.addRefAxis('localBodyAxis', curve = c2,axis='z')
def rampAngle(val,geo):
C = geo.extractCoef('localBodyAxis')
# the value will be ramp angle in degree.
# start with a conversion to rads
angle = (val[0])*numpy.pi/180.0
# Overall length needs to stay a 1.044, so use that as a ref for
# the final mesh point
# set the target length
lTarget = 0.222
hInit = 0.21 - 0.05
# compute the coefficient deltas
dx = lTarget*numpy.cos(angle)
dz = (lTarget*numpy.sin(angle))
topEdge = 0.338-dz
rearHeight = topEdge-0.05
coefPoint = rearHeight/2.0 +0.05
scalez = rearHeight/hInit
# Set the coefficients
C[3,0] = 1.044
C[2,0] = C[3,0]-dx
C[2,2] = 0.194
C[3,2] = coefPoint
geo.restoreCoef(C, 'localBodyAxis')
geo.scale_z['localBodyAxis'].coef[3] = scalez
return
def doubleRampAngle(val,geo):
C = geo.extractCoef('localBodyAxis')
# the values will be the upper and lower ramp angle
# in degree. Start with a conversion to rads.
upperAngle = (val[0])*numpy.pi/180.0
lowerAngle = (val[1])*numpy.pi/180.0
# Overall length needs to stay a 1.044, so use that as a ref for
# the final mesh point
# set the target length
lTarget = 0.222
hInit = 0.21 - 0.05
# compute the coefficient deltas
dx = lTarget*numpy.cos(upperAngle)
dzUpper = (lTarget*numpy.sin(upperAngle))
dzLower = (lTarget*numpy.sin(lowerAngle))
topEdge = 0.338-dzUpper
rearHeight = topEdge-0.05+dzLower
coefPoint = rearHeight/2.0 +0.05-dzLower
scalez = numpy.sqrt((rearHeight/hInit)**2)
# Set the coefficients
C[3,0] = 1.044
C[2,0] = C[3,0]-dx
C[2,2] = 0.194
C[3,2] = coefPoint
geo.restoreCoef(C, 'localBodyAxis')
geo.scale_z['localBodyAxis'].coef[3] = scalez
return
def length(val, geo):
C = geo.extractCoef('bodyAxis')
for i in range(len(C)):
#print 'C',i,C[i,0],val[i]
C[i,0] = val[i]
# end
geo.restoreCoef(C, 'bodyAxis')
return
def angleVars(val, geo):
C = geo.extractCoef('localBodyAxis')
for i in range(len(C)):
C[i,2] = val[i]
# end
geo.restoreCoef(C, 'localBodyAxis')
def noseLength(val, geo):
C = geo.extractCoef('bodyAxis')
length = val[0]
currentLength = C[2,0]-C[1,0]
C[1,0] = C[2,0]-length
geo.restoreCoef(C, 'bodyAxis')
return
# lower = [-2.,-2.,-2.,-2.,5.]
# upper = [-2.,5.,5.,5.,5.]
# DVGeo.addGeoDVGlobal('length', x, length,
# lower=lower, upper=upper, scale=1.0)
DVGeo.addGeoDVGlobal('noseLength', 0.3, noseLength,
lower=0, upper=0.5, scale=1.0)
# DVGeoChild.addGeoDVGlobal('rampAngle', 35.1, rampAngle,
# lower=0., upper=90., scale=1.0)
DVGeoChild.addGeoDVGlobal('doubleRampAngle', [35.1,-5.], doubleRampAngle,
lower=[0.,-45.], upper=[45.,5.], scale=[1.0,1.0])
# lowerA = [0.,0.,0.,0.]
# upperA = [0.3,0.3,0.3,0.3]
# DVGeoChild.addGeoDVGlobal('angleVars', z1, angleVars,
# lower=lowerA, upper=upperA, scale=1.0)
# lowerL = [-1.,-1.,-1.,-1.]
# upperL = [2.0,2.0,2.0,2.0]
# DVGeoChild.addGeoDVGlobal('noseLen', x1, noseLength,
# lower=lowerL, upper=upperL, scale=1.0)
# DVGeo.addGeoDVGlobal('angleVars', z1, angleVars,
# lower=lowerA, upper=upperA, scale=1.0)
# Add the child to the parent
DVGeo.addChild(DVGeoChild)
ptSetName = 'allSurfs'
freezeDict = {}#'0':['jLow'],'1':['jLow'],'2':['jLow']}#'0':['jLow'],'1':['jHigh','jLow']}
DVGeo.addPointSet(coords0, ptSetName, faceFreeze = freezeDict)
xDV = DVGeo.getValues()
# Required design variables
# Rear ramp angle, fixed 200 mm length
# overall length
# nose length
# Ramp shape
# Ground separation
# Lower ramp angle
# Case 1: Rear Ramp angle, fixed length
# Case 2: Upper and lower ramp angles, fixed length
# Case 3: Nose length ( Do with global FFD)
# Case 4: Overall length ( Do with global FFD)
# Case 5: Shape
# xDV['length'][2] = 1.75#2.0#1.05
# xDV['angleVars'][2] = 0.15
# xDV['angleVars'][0] = 0.19
# xDV['noseLen'][0] = -0.1
# xDV['angleVars'][1] = 0.18
# xDV['angleVars'][2] = 0.18
# xDV['angleVars'][3] = 0.12
DVGeo.setDesignVars(xDV)
mesh.setSurfaceCoordinates(DVGeo.update(ptSetName))
mesh.warpMesh()
DVGeo.writeTecplot('warpedFFD.dat')
#mesh.writeOFGridTecplot('warped.dat')
mesh.writeGrid()
# # Repeat ================
# #xDV['length'][2] = 1.25#2.0#1.05
# xDV['angleVars'][2] = 0.3
# DVGeo.setDesignVars(xDV)
# #coords = DVGeo.update(ptSetName)
# # for i in range(coords0.shape[0]):
# # if coords0[i,1]==0:
# # print 'x',coords[i,:]
# # # end
# # # end
# # for i in range(coords.shape[0]):
# # print 'x',coords0[i,:],coords[i,:]
# # # print DVGeo.update(ptSetName)
# #sys.exit(0)
# mesh.setSurfaceCoordinates(DVGeo.update(ptSetName))
# DVGeo.writeTecplot('warpedFFD2.dat')
# mesh.warpMesh()
# #print 'mesh warped'
# mesh.writeOpenFOAMVolumePoints()
# #print 'points updated'
# meshName = os.path.join(os.getcwd(),"testAhmedMesh2")
# mesh.writeGridTecplot(meshName)
# #print 'file written'
|
the-stack_106_25686 | #!/usr/bin/env python3
import numpy as np
from scipy import spatial
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
class PoseDistance:
def __init__(self):
# Assumes unit sphere
self.mass_matrix = np.identity(6)
# for finding translations
self.pt_zero = np.zeros([4,1])
self. pt_zero[3] = 1
# for finding rotation (3 vectors)
self.mat_rot_only = np.identity(4)
self.mat_rot_only[3,3] = 0
def set_mass_2D(self, span, depth):
self.mass_matrix[0,0] = 1/span
self.mass_matrix[1,1] = 1/depth
self.mass_matrix[2,2] = 2/np.pi
def set_mass_3D_sphere(self, radius ):
self.mass_matrix[0,0] = 1/radius
self.mass_matrix[1,1] = 1/radius
self.mass_matrix[2,2] = 2/np.pi
def get_translation(self, m1, m2):
pt_center1 = m1 @ self.pt_zero
pt_center2 = m2 @ self.pt_zero
return pt_center2 - pt_center1
def get_rotation(self, m1, m2):
m1_rot = m1 @ self.mat_rot_only
m2_rot = m2 @ self.mat_rot_only
# Make this a rotation from the origin to somewhere
m_rot = m2_rot[0:3,0:3] @ m1_rot[0:3,0:3].transpose()
m_rot = R.from_dcm( m_rot[0:3,0:3] )
# Extract as a quaternion
self.q = m_rot.as_quat()
return self.q
def get_time_derivative_body_frame(self, m1, m2):
m_body = np.linalg.inv(m1) @ m2
m_identity = np.identity(4)
d_trans = self.get_translation(m_identity, m_body)
d_quat = self.get_rotation(m_identity, m_body)
d_quat_R = R.from_quat(d_quat)
d_euc = d_quat_R.as_euler('xyz', degrees=False)
return [d_trans[0,0], d_trans[1,0], d_trans[2,0], d_euc[0], d_euc[1], d_euc[2]]
def distance_RT(self, m1, m2 ):
t = self.get_translation(m1, m2)
q = self.get_rotation(m1, m2)
# Euclidean distance of translation plus quaternion distance of rotation (angle rotated)
dist_T = spatial.distance.euclidean( [0,0,0], t[0:3] )
dist_R = 2.0 * np.arctan2( spatial.distance.euclidean(q[0:3], [0,0,0]), q[3] )
return dist_T + dist_R
def distance_LI(self, m1, m2 ):
return 0
def distance_RT_seq(self, ms):
if len(ms) < 2:
raise ValueError('ms needs to have at least two matrices\n')
d_sum = 0
for i in range(0,len(ms)-1):
time_deriv = self.get_time_derivative_body_frame(ms[i], ms[i+1])
d_step_cost = time_deriv @ self.mass_matrix @ np.transpose(time_deriv)
d_sum += d_step_cost
return d_sum
def get_sequence_RT(self, m1, m2, n = 10):
if n < 2:
n = 2
# Translation/rotation to m1
t_m1 = self.get_translation(np.identity(4), m1)
q_m1 = self.get_rotation(np.identity(4), m1)
# Translation/rotation to m2
t_m2 = self.get_translation(np.identity(4), m2)
q_m2 = self.get_rotation(np.identity(4), m2)
# For rotation interpolation
r1r2 = R.from_quat([q_m1, q_m2])
slerp = Slerp([0,1], r1r2)
# Our list of matrices to return
ms = []
# identity transforms/rotates - these will be filled in during loop
m_trans = np.identity(4)
m_rot = np.identity(4)
# linearly interpolate between 0 and 1
qs = slerp(np.linspace(0,1, n))
for i,dt in enumerate( np.linspace(0,1, n) ):
# move a bit
trans = (1-dt) * t_m1 + dt * t_m2
m_trans[0:3, 3] = np.transpose( trans[0:3] )
# Rotate by desired amount
m_rot[0:3,0:3] = qs[i].as_dcm()
# Move to m1
m_add = m_trans @ m_rot
ms.append( m_add )
return ms
def write_pt(m):
pt_zero = np.zeros([4, 1])
pt_zero[3] = 1
print( np.transpose( m @ pt_zero ) )
for i in range(0,3):
vec_zero = np.zeros([4,1])
vec_zero[i] = 1
print( np.transpose(m @ vec_zero) )
print('\n')
if __name__ == '__main__':
print('Checking Pose Distance Metric code\n')
m1 = np.identity(4)
d_ang = np.pi / 3
m1[0:3,3] = [1,1,1]
m1[0:3,0:3] = R.as_dcm( R.from_euler( 'XYZ', [0,0,d_ang]) )
m2 = np.identity(4)
m2[0:3,3] = [2,2,2]
pd = PoseDistance()
dist_RT = pd.distance_RT(m1, m2)
dist_RT_seq = pd.distance_RT_seq( pd.get_sequence_RT(m1, m2, n = 10) )
print('Distance RT, should be {0:0.2f}, is {1:0.2f}, integrated {2:0.2f}\n'.format( np.sqrt(3) + d_ang, dist_RT, dist_RT_seq ) )
mat_seq2 = pd.get_sequence_RT(m1, m2, 2)
print('M1:')
write_pt(m1)
print('M2:')
write_pt(m2)
for i,m in enumerate(mat_seq2):
print('M{}\n'.format(i))
write_pt(m)
mat_seq4 = pd.get_sequence_RT(m1, m2, 8)
for i,m in enumerate(mat_seq4):
print('M{}\n'.format(i))
write_pt(m)
|
the-stack_106_25687 | import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmax", parent_name="scattersmith.marker", **kwargs):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
the-stack_106_25688 | from typing import Any, Dict, List, NamedTuple, Tuple, cast
from ee.clickhouse.client import substitute_params, sync_execute
from ee.clickhouse.models.action import format_action_filter
from ee.clickhouse.queries.person_distinct_id_query import get_team_distinct_ids_query
from ee.clickhouse.queries.retention.retention_actors import (
ClickhouseRetentionActors,
ClickhouseRetentionActorsByPeriod,
)
from ee.clickhouse.queries.retention.retention_event_query import RetentionEventsQuery
from ee.clickhouse.queries.util import get_trunc_func_ch
from ee.clickhouse.sql.retention.retention import (
INITIAL_BREAKDOWN_INTERVAL_SQL,
INITIAL_INTERVAL_SQL,
RETENTION_BREAKDOWN_SQL,
RETENTION_SQL,
)
from posthog.constants import (
RETENTION_FIRST_TIME,
TREND_FILTER_TYPE_ACTIONS,
TREND_FILTER_TYPE_EVENTS,
TRENDS_LINEAR,
RetentionQueryType,
)
from posthog.models.action import Action
from posthog.models.entity import Entity
from posthog.models.filters import RetentionFilter
from posthog.models.team import Team
from posthog.queries.retention import AppearanceRow, Retention
CohortKey = NamedTuple("CohortKey", (("breakdown_values", Tuple[str]), ("period", int)))
class ClickhouseRetention(Retention):
def _get_retention_by_cohort(self, filter: RetentionFilter, team: Team,) -> Dict[Tuple[int, int], Dict[str, Any]]:
period = filter.period
is_first_time_retention = filter.retention_type == RETENTION_FIRST_TIME
date_from = filter.date_from
trunc_func = get_trunc_func_ch(period)
returning_event_query, returning_event_params = RetentionEventsQuery(
filter=filter, team_id=team.pk, event_query_type=RetentionQueryType.RETURNING
).get_query()
target_event_query, target_event_params = RetentionEventsQuery(
filter=filter,
team_id=team.pk,
event_query_type=RetentionQueryType.TARGET_FIRST_TIME
if is_first_time_retention
else RetentionQueryType.TARGET,
).get_query()
all_params = {
"team_id": team.pk,
"start_date": date_from.strftime(
"%Y-%m-%d{}".format(" %H:%M:%S" if filter.period == "Hour" else " 00:00:00")
),
**returning_event_params,
**target_event_params,
"period": period,
}
result = sync_execute(
RETENTION_SQL.format(
returning_event_query=returning_event_query,
trunc_func=trunc_func,
target_event_query=target_event_query,
),
all_params,
)
initial_interval_result = sync_execute(
INITIAL_INTERVAL_SQL.format(reference_event_sql=target_event_query, trunc_func=trunc_func,), all_params,
)
result_dict = {}
for initial_res in initial_interval_result:
result_dict.update({(initial_res[0], 0): {"count": initial_res[1], "people": []}})
for res in result:
result_dict.update({(res[0], res[1]): {"count": res[2], "people": []}})
return result_dict
def _get_retention_by_breakdown_values(
self, filter: RetentionFilter, team: Team,
) -> Dict[CohortKey, Dict[str, Any]]:
period = filter.period
is_first_time_retention = filter.retention_type == RETENTION_FIRST_TIME
date_from = filter.date_from
trunc_func = get_trunc_func_ch(period)
returning_event_query_templated, returning_event_params = RetentionEventsQuery(
filter=filter.with_data({"breakdowns": []}), # Avoid pulling in breakdown values from reterning event query
team_id=team.pk,
event_query_type=RetentionQueryType.RETURNING,
).get_query()
returning_event_query = substitute_params(returning_event_query_templated, returning_event_params)
target_event_query_templated, target_event_params = RetentionEventsQuery(
filter=filter,
team_id=team.pk,
event_query_type=(
RetentionQueryType.TARGET_FIRST_TIME if is_first_time_retention else RetentionQueryType.TARGET
),
).get_query()
target_event_query = substitute_params(target_event_query_templated, target_event_params)
all_params = {
"team_id": team.pk,
"start_date": date_from.strftime(
"%Y-%m-%d{}".format(" %H:%M:%S" if filter.period == "Hour" else " 00:00:00")
),
"total_intervals": filter.total_intervals,
"period": period.lower(),
"breakdown_by": filter.breakdown,
}
result = sync_execute(
substitute_params(RETENTION_BREAKDOWN_SQL, all_params).format(
returning_event_query=returning_event_query,
trunc_func=trunc_func,
target_event_query=target_event_query,
GET_TEAM_PERSON_DISTINCT_IDS=get_team_distinct_ids_query(team.pk),
)
)
result = [(tuple(res[0]), *res[1:]) for res in result] # make breakdown hashable, required later
initial_interval_result = sync_execute(
substitute_params(INITIAL_BREAKDOWN_INTERVAL_SQL, all_params).format(
reference_event_sql=target_event_query, trunc_func=trunc_func,
),
)
initial_interval_result = [
(tuple(res[0]), *res[1:]) for res in initial_interval_result
] # make breakdown hashable, required later
result_dict = {}
for initial_res in initial_interval_result:
result_dict.update({CohortKey(initial_res[0], 0): {"count": initial_res[1], "people": []}})
for res in result:
result_dict.update({CohortKey(res[0], res[1]): {"count": res[2], "people": []}})
return result_dict
def run(self, filter: RetentionFilter, team: Team, *args, **kwargs) -> List[Dict[str, Any]]:
if filter.display == TRENDS_LINEAR:
# If we get a display=TRENDS_LINEAR then don't do anything special
# with breakdowns. This code path will be removed anyway in a future
# change.
retention_by_cohort = self._get_retention_by_cohort(filter, team)
return self.process_graph_result(retention_by_cohort, filter)
if filter.breakdowns and filter.breakdown_type:
retention_by_breakdown = self._get_retention_by_breakdown_values(filter, team)
return self.process_breakdown_table_result(retention_by_breakdown, filter)
else:
# If we're not using breakdowns, just use the non-clickhouse
# `process_table_result`
retention_by_cohort = self._get_retention_by_cohort(filter, team)
return self.process_table_result(retention_by_cohort, filter)
def process_breakdown_table_result(
self, resultset: Dict[CohortKey, Dict[str, Any]], filter: RetentionFilter,
):
result = [
{
"values": [
resultset.get(CohortKey(breakdown_values, interval), {"count": 0, "people": []})
for interval in range(filter.total_intervals)
],
"label": "::".join(breakdown_values),
"breakdown_values": breakdown_values,
}
for breakdown_values in set(
cohort_key.breakdown_values for cohort_key in cast(Dict[CohortKey, Dict[str, Any]], resultset).keys()
)
]
return result
def _get_condition(self, target_entity: Entity, table: str, prepend: str = "") -> Tuple[str, Dict]:
if target_entity.type == TREND_FILTER_TYPE_ACTIONS:
action = Action.objects.get(pk=target_entity.id)
action_query, params = format_action_filter(action, prepend=prepend, use_loop=False)
condition = action_query
elif target_entity.type == TREND_FILTER_TYPE_EVENTS:
condition = "{}.event = %({}_event)s".format(table, prepend)
params = {"{}_event".format(prepend): target_entity.id}
else:
condition = "{}.event = %({}_event)s".format(table, prepend)
params = {"{}_event".format(prepend): "$pageview"}
return condition, params
def _retrieve_actors(self, filter: RetentionFilter, team: Team):
_, serialized_actors = ClickhouseRetentionActors(filter=filter, team=team).get_actors()
return serialized_actors
def _retrieve_actors_in_period(self, filter: RetentionFilter, team: Team):
query_builder = ClickhouseRetentionActorsByPeriod(filter=filter, team=team)
query, params = query_builder.actor_query()
# NOTE: I'm using `Any` here to avoid typing issues when trying to iterate.
query_result: Any = sync_execute(query, params)
actor_appearances = [
AppearanceRow(actor_id=row[0], appearance_count=row[1], appearances=row[2]) for row in query_result
]
_, serialized_actors = query_builder.get_actors_from_result(query_result)
actor_dict = {str(actor["id"]): actor for actor in serialized_actors}
# adjust total intervals to expected number of appearances based on selected interval
filter = filter.with_data({"total_intervals": filter.total_intervals - filter.selected_interval})
result = self.process_actors_in_period(
filter=filter, actor_appearances=actor_appearances, actor_dict=actor_dict
)
return result
|
the-stack_106_25690 | _base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='D2Det',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
# neck=dict(
# type='FPN',
# in_channels=[256, 512, 1024, 2048],
# out_channels=256,
# num_outs=5),
neck=dict(
type='AugGsaFPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='D2DetRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='DeformRoIPoolingPack',
out_size=7,
sample_per_part=2,
out_channels=256,
no_trans=False,
group_size=1,
trans_std=0.1),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
with_reg=False,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# num_classes=80,
num_classes=16,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0)),
reg_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
d2det_head=dict(
type='D2DetHead',
num_convs=1,
in_channels=256,
# num_classes=80,
num_classes=16,
norm_cfg=dict(type='GN', num_groups=36),
MASK_ON=False)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_radius=1,
pos_weight=-1,
max_num_reg=192,
mask_size=28,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.03, nms=dict(type='nms', iou_thr=0.5), max_per_img=125))
# optimizer
# optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1800,
warmup_ratio=1.0 / 80,
# step=[20, 23])
step=[8, 11])
# total_epochs = 24
total_epochs = 12 |
the-stack_106_25695 | #!/usr/bin/env python
# encoding: utf-8
"""
pymongo.py
This is the new mongo adapter for scout that skips mongoengine and uses pymongo,
it is a communicator for quering and updating the mongodatabase.
This is best practice:
uri = "mongodb://%s:%s@%s" % (
quote_plus(user), quote_plus(password), host)
client = MongoClient(uri)
This is to check if server is available:
from pymongo.errors import ConnectionFailure
client = MongoClient()
try:
# The ismaster command is cheap and does not require auth.
client.admin.command('ismaster')
except ConnectionFailure:
print("Server not available")
Created by Måns Magnusson on 2017-02-15.
Copyright (c) 2017 __MoonsoInc__. All rights reserved.
"""
import logging
from datetime import datetime
from .hgnc import GeneHandler
from .case import CaseHandler
from .institute import InstituteHandler
from .event import EventHandler
from .hpo import HpoHandler
from .panel import PanelHandler
from .query import QueryHandler
from .variant import VariantHandler
from .user import UserHandler
from .acmg import ACMGHandler
from .index import IndexHandler
from .clinvar import ClinVarHandler
from .matchmaker import MMEHandler
log = logging.getLogger(__name__)
class MongoAdapter(GeneHandler, CaseHandler, InstituteHandler, EventHandler,
HpoHandler, PanelHandler, QueryHandler, VariantHandler,
UserHandler, ACMGHandler, IndexHandler, ClinVarHandler,
MMEHandler):
"""Adapter for cummunication with a mongo database."""
def __init__(self, database=None):
if database:
self.setup(database)
def init_app(self, app):
"""Setup via Flask."""
host = app.config.get('MONGO_HOST', 'localhost')
port = app.config.get('MONGO_PORT', 27017)
dbname = app.config['MONGO_DBNAME']
log.info("connecting to database: %s:%s/%s", host, port, dbname)
self.setup(app.config['MONGO_DATABASE'])
def setup(self, database):
"""Setup connection to database."""
self.db = database
self.hgnc_collection = database.hgnc_gene
self.user_collection = database.user
self.whitelist_collection = database.whitelist
self.institute_collection = database.institute
self.event_collection = database.event
self.case_collection = database.case
self.panel_collection = database.gene_panel
self.hpo_term_collection = database.hpo_term
self.disease_term_collection = database.disease_term
self.variant_collection = database.variant
self.acmg_collection = database.acmg
self.clinvar_collection = database.clinvar
self.clinvar_submission_collection = database.clinvar_submission
self.exon_collection = database.exon
self.transcript_collection = database.transcript
def collections(self):
"""Return all collection names
Returns:
collection_names(list(str))
"""
return self.db.collection_names(include_system_collections=False)
def __str__(self):
return "MongoAdapter(db={0})".format(self.db)
|
the-stack_106_25698 | # -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETL_450(SpellEntity):
"""
火球术5
造成$12点伤害。0造成$13点伤害。0造成$14点伤害。0造成$15点伤害。0造成$16点伤害。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 12
self.range = 1
def play(self, game, hero, target):
power = game.get_spell_power(self.spell_school, hero.own)
target.got_damage(game, (self.damage + power) * self.damage_advantage[self.lettuce_role][target.lettuce_role])
|
the-stack_106_25704 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from attacks.abstract_attack import AbstractAttack
from lib.rsalibnum import gcd
from Crypto.Util.number import long_to_bytes, bytes_to_long
import gmpy2
import itertools
# Source: https://crypto.stackexchange.com/a/60404
def bytes_to_integer(data):
output = 0
size = len(data)
for index in range(size):
output |= data[index] << (8 * (size - 1 - index))
return output
def integer_to_bytes(integer, _bytes):
output = bytearray()
for byte in range(_bytes):
output.append((integer >> (8 * (_bytes - 1 - byte))) & 255)
return output
# Source: https://github.com/ashutosh1206/Crypton/blob/master/RSA-encryption/Attack-Common-Modulus/exploit.py
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
# Calculates a^{b} mod n when b is negative
def neg_pow(a, b, n):
assert b < 0
assert gcd(a, n) == 1
res = int(gmpy2.invert(a, n))
res = pow(res, b * (-1), n)
return res
# e1 --> Public Key exponent used to encrypt message m and get ciphertext c1
# e2 --> Public Key exponent used to encrypt message m and get ciphertext c2
# n --> Modulus
# The following attack works only when m^{GCD(e1, e2)} < n
def common_modulus(e1, e2, n, c1, c2):
c1 = bytes_to_long(c1)
c2 = bytes_to_long(c2)
g, a, b = egcd(e1, e2)
if a < 0:
c1 = neg_pow(c1, a, n)
else:
c1 = pow(c1, a, n)
if b < 0:
c2 = neg_pow(c2, b, n)
else:
c2 = pow(c2, b, n)
ct = c1 * c2 % n
m = int(gmpy2.iroot(ct, g)[0])
return m
class Attack(AbstractAttack):
def __init__(self, timeout=60):
super().__init__(timeout)
self.speed = AbstractAttack.speed_enum["fast"]
def common_modulus_attack(self, c1, c2, k1, k2):
if k1.n != k2.n:
return None
if gcd(k1.e, k2.e) != 1:
return None
deciphered_message = common_modulus(k1.e, k2.e, k1.n, c1, c2)
return long_to_bytes(deciphered_message)
def attack(self, publickeys, cipher=[]):
"""Common modulus attack"""
if len(publickeys) < 2:
return (None, None)
if len(cipher) < 2:
return (None, None)
plains = []
for k1, k2 in itertools.combinations(publickeys, 2):
for c1, c2 in itertools.combinations(cipher, 2):
plains.append(self.common_modulus_attack(c1, c2, k1, k2))
if all([_ == None for _ in plains]):
plains = None
return (None, plains)
|
the-stack_106_25706 | #!/usr/bin/env python
""" turtle-example-suite:
tdemo_yinyang.py
Another drawing suitable as a beginner's
programming example.
The small circles are drawn by the circle
command.
"""
from turtle import *
def yin(radius, color1, color2):
width(3)
color("black")
fill(True)
circle(radius/2., 180)
circle(radius, 180)
left(180)
circle(-radius/2., 180)
color(color1)
fill(True)
color(color2)
left(90)
up()
forward(radius*0.375)
right(90)
down()
circle(radius*0.125)
left(90)
fill(False)
up()
backward(radius*0.375)
down()
left(90)
def main():
reset()
yin(200, "white", "black")
yin(200, "black", "white")
ht()
return "Done!"
if __name__ == '__main__':
main()
mainloop()
|
the-stack_106_25707 | #!/usr/bin/env python
from scipy.stats import t, laplace, norm
import numpy as np
import matplotlib.pylab as pl
x = np.linspace(-4, 4, 100)
n = norm.pdf(x, loc=0, scale=1)
l = laplace.pdf(x, loc=0, scale=1 / (2 ** 0.5))
t = t.pdf(x, df=1, loc=0, scale=1)
pl.plot(n, 'k:',
t, 'b--',
l, 'r-')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('studentLaplacePdfPlot_1.png')
pl.figure()
pl.plot(np.log(n), 'k:',
np.log(t), 'b--',
np.log(l), 'r-')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('studentLaplacePdfPlot_2.png')
pl.show()
|
the-stack_106_25708 | """Fully-connected architecture."""
import torch
import torch.nn as nn
__all__ = ['MLP']
class MLP(nn.Module):
def __init__(self, input_size, output_size, nhidden=3, dhidden=16, activation=nn.ReLU, bias=True):
super(MLP, self).__init__()
self.nhidden = nhidden
if isinstance(dhidden, int):
dhidden = [dhidden] * (self.nhidden + 1) # one for input layer
input_layer = nn.Linear(input_size, dhidden[0], bias=bias)
hidden_layers = [nn.Linear(dhidden[i], dhidden[i+1], bias=bias) for i in range(nhidden)]
output_layer = nn.Linear(dhidden[nhidden], output_size, bias=bias)
layers = [input_layer] + hidden_layers + [output_layer]
main = []
for l in layers:
main.extend([l, activation()])
main = main[:-1] # no activation after last layer
self.main = nn.Sequential(*main)
def forward(self, x, c=None):
assert x.ndim == 2
out = self.main(x)
if c is not None:
out = out[range(len(c)), c].unsqueeze(1)
return out |
the-stack_106_25712 | import polygon
from polygon import StreamClient, enums
import datetime
from datetime import datetime
import time
import threading
import config
import traceback
import requests
import redis
import json
print("starting stream...")
key = config.polygon_key
def connections():
# redis_pool = redis.ConnectionPool(host=config.redis_host, port=config.redis_port, db=0, password=config.redis_pw)
redis_pool = redis.ConnectionPool(connection_class=redis.UnixDomainSocketConnection, path="/var/run/redis/redis-server.sock",
password=config.redis_pw, db=0)
r = redis.Redis(connection_pool=redis_pool, charset="utf-8", decode_responses=True)
print('redis connected', r, redis_pool)
return r
def redis_message(messages):
for message in messages:
r.rpush('crypto-list', json.dumps(message))
return None
def unix_convert(ts):
ts = int(ts/1000)
tdate = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return tdate
def save_data(message):
z = time.time()
keys = {'pair': 'pair', 'v': 'tick_volume', 'vw': 'tick_vwap', 'z': 'avg_trade_size', 'o': 'tick_open',
'c': 'tick_close', 'h': 'tick_high', 'l': 'tick_low', 's': 'time_beg', 'e': 'time_end'}
# Drop Unknown Keys
key_count = len(message[0].keys())
if key_count > len(keys.keys())+1:
message = [{k: single[k] for k in keys if k in single} for single in message]
print('New fields detected! Check API documentation: https://polygon.io/docs/websockets/')
else:
message = [{k: single[k] for k in keys if k in single} for single in message]
new_message = []
for d in message:
# del d['ev'] # delete status
d = {keys[name]: val for name, val in d.items()}
d['tdate'] = unix_convert(d['time_end'])
d['save_date'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
cols = ['pair', 'tick_volume', 'tick_vwap', 'avg_trade_size', 'tick_open', 'tick_close', 'tick_high',
'tick_low', 'time_beg', 'time_end', 'tdate', 'save_date']
d = {k: d[k] for k in cols}
new_message.append(d)
redis_message(new_message)
# print(message)
print(datetime.utcnow(), 'crypto', time.time()-z)
return None
def my_custom_process_message(ws, msg):
message = json.loads(msg)
if message[0]['ev'] != 'status':
threading.Thread(target=save_data, args=[message]).start()
return None
def my_custom_error_handler(ws, error):
raise ValueError('an error happened:', error)
def my_custom_close_handler(ws, close_code, close_msg):
print("closed connection", close_code, close_msg)
return None
def main():
# my_client = polygon.StreamClient(key, polygon.enums.StreamCluster('stocks'), on_message=my_custom_process_message,
# on_close=my_custom_close_handler, on_error=my_custom_error_handler)
my_client = polygon.StreamClient(key, polygon.enums.StreamCluster('crypto'), on_message=my_custom_process_message,
on_close=my_custom_close_handler, on_error=my_custom_error_handler)
try:
my_client.start_stream_thread()
# my_client.subscribe_stock_second_aggregates()
my_client.subscribe_crypto_minute_aggregates()
# my_client.subscribe_stock_trades()
except Exception:
traceback.print_exc()
# my_client.unsubscribe_stock_second_aggregates()
my_client.unsubscribe_crypto_minute_aggregates()
# my_client.unsubscribe_stock_trades_aggregates()
return None
def internet_check():
url = "https://socket.polygon.io"
timeout = 15
try:
requests.get(url, timeout=timeout)
connected = True
print('Internet connected')
except (requests.ConnectionError, requests.Timeout):
connected = False
print('No Internet')
return connected
if __name__ == "__main__":
connected = internet_check()
if connected:
try:
r = connections()
main()
except Exception:
traceback.print_exc()
time.sleep(1)
connected = internet_check()
while not connected:
connected = internet_check()
time.sleep(5)
while not connected:
connected = internet_check()
time.sleep(5)
continue
|
the-stack_106_25713 | # Copyright (C) 2010, 2011 Sebastian Thiel ([email protected]) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from io import BytesIO
import mmap
import os
import sys
import zlib
from gitdb.fun import (
msb_size,
stream_copy,
apply_delta_data,
connect_deltas,
delta_types
)
from gitdb.util import (
allocate_memory,
LazyMixin,
make_sha,
write,
close,
)
from gitdb.const import NULL_BYTE, BYTE_SPACE
from gitdb.utils.compat import buffer
from gitdb.utils.encoding import force_bytes
has_perf_mod = False
PY26 = sys.version_info[:2] < (2, 7)
try:
from _perf import apply_delta as c_apply_delta
has_perf_mod = True
except ImportError:
pass
__all__ = ('DecompressMemMapReader', 'FDCompressedSha1Writer', 'DeltaApplyReader',
'Sha1Writer', 'FlexibleSha1Writer', 'ZippedStoreShaWriter', 'FDCompressedSha1Writer',
'FDStream', 'NullStream')
#{ RO Streams
class DecompressMemMapReader(LazyMixin):
"""Reads data in chunks from a memory map and decompresses it. The client sees
only the uncompressed data, respective file-like read calls are handling on-demand
buffered decompression accordingly
A constraint on the total size of bytes is activated, simulating
a logical file within a possibly larger physical memory area
To read efficiently, you clearly don't want to read individual bytes, instead,
read a few kilobytes at least.
**Note:** The chunk-size should be carefully selected as it will involve quite a bit
of string copying due to the way the zlib is implemented. Its very wasteful,
hence we try to find a good tradeoff between allocation time and number of
times we actually allocate. An own zlib implementation would be good here
to better support streamed reading - it would only need to keep the mmap
and decompress it into chunks, that's all ... """
__slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close',
'_cbr', '_phi')
max_read_size = 512 * 1024 # currently unused
def __init__(self, m, close_on_deletion, size=None):
"""Initialize with mmap for stream reading
:param m: must be content data - use new if you have object data and no size"""
self._m = m
self._zip = zlib.decompressobj()
self._buf = None # buffer of decompressed bytes
self._buflen = 0 # length of bytes in buffer
if size is not None:
self._s = size # size of uncompressed data to read in total
self._br = 0 # num uncompressed bytes read
self._cws = 0 # start byte of compression window
self._cwe = 0 # end byte of compression window
self._cbr = 0 # number of compressed bytes read
self._phi = False # is True if we parsed the header info
self._close = close_on_deletion # close the memmap on deletion ?
def _set_cache_(self, attr):
assert attr == '_s'
# only happens for size, which is a marker to indicate we still
# have to parse the header from the stream
self._parse_header_info()
def __del__(self):
self.close()
def _parse_header_info(self):
"""If this stream contains object data, parse the header info and skip the
stream to a point where each read will yield object content
:return: parsed type_string, size"""
# read header
# should really be enough, cgit uses 8192 I believe
# And for good reason !! This needs to be that high for the header to be read correctly in all cases
maxb = 8192
self._s = maxb
hdr = self.read(maxb)
hdrend = hdr.find(NULL_BYTE)
typ, size = hdr[:hdrend].split(BYTE_SPACE)
size = int(size)
self._s = size
# adjust internal state to match actual header length that we ignore
# The buffer will be depleted first on future reads
self._br = 0
hdrend += 1
self._buf = BytesIO(hdr[hdrend:])
self._buflen = len(hdr) - hdrend
self._phi = True
return typ, size
#{ Interface
@classmethod
def new(self, m, close_on_deletion=False):
"""Create a new DecompressMemMapReader instance for acting as a read-only stream
This method parses the object header from m and returns the parsed
type and size, as well as the created stream instance.
:param m: memory map on which to operate. It must be object data ( header + contents )
:param close_on_deletion: if True, the memory map will be closed once we are
being deleted"""
inst = DecompressMemMapReader(m, close_on_deletion, 0)
typ, size = inst._parse_header_info()
return typ, size, inst
def data(self):
""":return: random access compatible data we are working on"""
return self._m
def close(self):
"""Close our underlying stream of compressed bytes if this was allowed during initialization
:return: True if we closed the underlying stream
:note: can be called safely
"""
if self._close:
if hasattr(self._m, 'close'):
self._m.close()
self._close = False
# END handle resource freeing
def compressed_bytes_read(self):
"""
:return: number of compressed bytes read. This includes the bytes it
took to decompress the header ( if there was one )"""
# ABSTRACT: When decompressing a byte stream, it can be that the first
# x bytes which were requested match the first x bytes in the loosely
# compressed datastream. This is the worst-case assumption that the reader
# does, it assumes that it will get at least X bytes from X compressed bytes
# in call cases.
# The caveat is that the object, according to our known uncompressed size,
# is already complete, but there are still some bytes left in the compressed
# stream that contribute to the amount of compressed bytes.
# How can we know that we are truly done, and have read all bytes we need
# to read ?
# Without help, we cannot know, as we need to obtain the status of the
# decompression. If it is not finished, we need to decompress more data
# until it is finished, to yield the actual number of compressed bytes
# belonging to the decompressed object
# We are using a custom zlib module for this, if its not present,
# we try to put in additional bytes up for decompression if feasible
# and check for the unused_data.
# Only scrub the stream forward if we are officially done with the
# bytes we were to have.
if self._br == self._s and not self._zip.unused_data:
# manipulate the bytes-read to allow our own read method to continue
# but keep the window at its current position
self._br = 0
if hasattr(self._zip, 'status'):
while self._zip.status == zlib.Z_OK:
self.read(mmap.PAGESIZE)
# END scrub-loop custom zlib
else:
# pass in additional pages, until we have unused data
while not self._zip.unused_data and self._cbr != len(self._m):
self.read(mmap.PAGESIZE)
# END scrub-loop default zlib
# END handle stream scrubbing
# reset bytes read, just to be sure
self._br = self._s
# END handle stream scrubbing
# unused data ends up in the unconsumed tail, which was removed
# from the count already
return self._cbr
#} END interface
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self._zip = zlib.decompressobj()
self._br = self._cws = self._cwe = self._cbr = 0
if self._phi:
self._phi = False
del(self._s) # trigger header parsing on first access
# END skip header
def read(self, size=-1):
if size < 1:
size = self._s - self._br
else:
size = min(size, self._s - self._br)
# END clamp size
if size == 0:
return bytes()
# END handle depletion
# deplete the buffer, then just continue using the decompress object
# which has an own buffer. We just need this to transparently parse the
# header from the zlib stream
dat = bytes()
if self._buf:
if self._buflen >= size:
# have enough data
dat = self._buf.read(size)
self._buflen -= size
self._br += size
return dat
else:
dat = self._buf.read() # ouch, duplicates data
size -= self._buflen
self._br += self._buflen
self._buflen = 0
self._buf = None
# END handle buffer len
# END handle buffer
# decompress some data
# Abstract: zlib needs to operate on chunks of our memory map ( which may
# be large ), as it will otherwise and always fill in the 'unconsumed_tail'
# attribute which possible reads our whole map to the end, forcing
# everything to be read from disk even though just a portion was requested.
# As this would be a nogo, we workaround it by passing only chunks of data,
# moving the window into the memory map along as we decompress, which keeps
# the tail smaller than our chunk-size. This causes 'only' the chunk to be
# copied once, and another copy of a part of it when it creates the unconsumed
# tail. We have to use it to hand in the appropriate amount of bytes during
# the next read.
tail = self._zip.unconsumed_tail
if tail:
# move the window, make it as large as size demands. For code-clarity,
# we just take the chunk from our map again instead of reusing the unconsumed
# tail. The latter one would safe some memory copying, but we could end up
# with not getting enough data uncompressed, so we had to sort that out as well.
# Now we just assume the worst case, hence the data is uncompressed and the window
# needs to be as large as the uncompressed bytes we want to read.
self._cws = self._cwe - len(tail)
self._cwe = self._cws + size
else:
cws = self._cws
self._cws = self._cwe
self._cwe = cws + size
# END handle tail
# if window is too small, make it larger so zip can decompress something
if self._cwe - self._cws < 8:
self._cwe = self._cws + 8
# END adjust winsize
# takes a slice, but doesn't copy the data, it says ...
indata = buffer(self._m, self._cws, self._cwe - self._cws)
# get the actual window end to be sure we don't use it for computations
self._cwe = self._cws + len(indata)
dcompdat = self._zip.decompress(indata, size)
# update the amount of compressed bytes read
# We feed possibly overlapping chunks, which is why the unconsumed tail
# has to be taken into consideration, as well as the unused data
# if we hit the end of the stream
# NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly.
# They are thorough, and I assume it is truly working.
# Why is this logic as convoluted as it is ? Please look at the table in
# https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results.
# Bascially, on py2.6, you want to use branch 1, whereas on all other python version, the second branch
# will be the one that works.
# However, the zlib VERSIONs as well as the platform check is used to further match the entries in the
# table in the github issue. This is it ... it was the only way I could make this work everywhere.
# IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... .
if PY26 or ((zlib.ZLIB_VERSION == '1.2.7' or zlib.ZLIB_VERSION == '1.2.5') and not sys.platform == 'darwin'):
unused_datalen = len(self._zip.unconsumed_tail)
else:
unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
# # end handle very special case ...
self._cbr += len(indata) - unused_datalen
self._br += len(dcompdat)
if dat:
dcompdat = dat + dcompdat
# END prepend our cached data
# it can happen, depending on the compression, that we get less bytes
# than ordered as it needs the final portion of the data as well.
# Recursively resolve that.
# Note: dcompdat can be empty even though we still appear to have bytes
# to read, if we are called by compressed_bytes_read - it manipulates
# us to empty the stream
if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s:
dcompdat += self.read(size - len(dcompdat))
# END handle special case
return dcompdat
class DeltaApplyReader(LazyMixin):
"""A reader which dynamically applies pack deltas to a base object, keeping the
memory demands to a minimum.
The size of the final object is only obtainable once all deltas have been
applied, unless it is retrieved from a pack index.
The uncompressed Delta has the following layout (MSB being a most significant
bit encoded dynamic size):
* MSB Source Size - the size of the base against which the delta was created
* MSB Target Size - the size of the resulting data after the delta was applied
* A list of one byte commands (cmd) which are followed by a specific protocol:
* cmd & 0x80 - copy delta_data[offset:offset+size]
* Followed by an encoded offset into the delta data
* Followed by an encoded size of the chunk to copy
* cmd & 0x7f - insert
* insert cmd bytes from the delta buffer into the output stream
* cmd == 0 - invalid operation ( or error in delta stream )
"""
__slots__ = (
"_bstream", # base stream to which to apply the deltas
"_dstreams", # tuple of delta stream readers
"_mm_target", # memory map of the delta-applied data
"_size", # actual number of bytes in _mm_target
"_br" # number of bytes read
)
#{ Configuration
k_max_memory_move = 250 * 1000 * 1000
#} END configuration
def __init__(self, stream_list):
"""Initialize this instance with a list of streams, the first stream being
the delta to apply on top of all following deltas, the last stream being the
base object onto which to apply the deltas"""
assert len(stream_list) > 1, "Need at least one delta and one base stream"
self._bstream = stream_list[-1]
self._dstreams = tuple(stream_list[:-1])
self._br = 0
def _set_cache_too_slow_without_c(self, attr):
# the direct algorithm is fastest and most direct if there is only one
# delta. Also, the extra overhead might not be worth it for items smaller
# than X - definitely the case in python, every function call costs
# huge amounts of time
# if len(self._dstreams) * self._bstream.size < self.k_max_memory_move:
if len(self._dstreams) == 1:
return self._set_cache_brute_(attr)
# Aggregate all deltas into one delta in reverse order. Hence we take
# the last delta, and reverse-merge its ancestor delta, until we receive
# the final delta data stream.
dcl = connect_deltas(self._dstreams)
# call len directly, as the (optional) c version doesn't implement the sequence
# protocol
if dcl.rbound() == 0:
self._size = 0
self._mm_target = allocate_memory(0)
return
# END handle empty list
self._size = dcl.rbound()
self._mm_target = allocate_memory(self._size)
bbuf = allocate_memory(self._bstream.size)
stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE)
# APPLY CHUNKS
write = self._mm_target.write
dcl.apply(bbuf, write)
self._mm_target.seek(0)
def _set_cache_brute_(self, attr):
"""If we are here, we apply the actual deltas"""
# TODO: There should be a special case if there is only one stream
# Then the default-git algorithm should perform a tad faster, as the
# delta is not peaked into, causing less overhead.
buffer_info_list = list()
max_target_size = 0
for dstream in self._dstreams:
buf = dstream.read(512) # read the header information + X
offset, src_size = msb_size(buf)
offset, target_size = msb_size(buf, offset)
buffer_info_list.append((buffer(buf, offset), offset, src_size, target_size))
max_target_size = max(max_target_size, target_size)
# END for each delta stream
# sanity check - the first delta to apply should have the same source
# size as our actual base stream
base_size = self._bstream.size
target_size = max_target_size
# if we have more than 1 delta to apply, we will swap buffers, hence we must
# assure that all buffers we use are large enough to hold all the results
if len(self._dstreams) > 1:
base_size = target_size = max(base_size, max_target_size)
# END adjust buffer sizes
# Allocate private memory map big enough to hold the first base buffer
# We need random access to it
bbuf = allocate_memory(base_size)
stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE)
# allocate memory map large enough for the largest (intermediate) target
# We will use it as scratch space for all delta ops. If the final
# target buffer is smaller than our allocated space, we just use parts
# of it upon return.
tbuf = allocate_memory(target_size)
# for each delta to apply, memory map the decompressed delta and
# work on the op-codes to reconstruct everything.
# For the actual copying, we use a seek and write pattern of buffer
# slices.
final_target_size = None
for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)):
# allocate a buffer to hold all delta data - fill in the data for
# fast access. We do this as we know that reading individual bytes
# from our stream would be slower than necessary ( although possible )
# The dbuf buffer contains commands after the first two MSB sizes, the
# offset specifies the amount of bytes read to get the sizes.
ddata = allocate_memory(dstream.size - offset)
ddata.write(dbuf)
# read the rest from the stream. The size we give is larger than necessary
stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE)
#######################################################################
if 'c_apply_delta' in globals():
c_apply_delta(bbuf, ddata, tbuf)
else:
apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write)
#######################################################################
# finally, swap out source and target buffers. The target is now the
# base for the next delta to apply
bbuf, tbuf = tbuf, bbuf
bbuf.seek(0)
tbuf.seek(0)
final_target_size = target_size
# END for each delta to apply
# its already seeked to 0, constrain it to the actual size
# NOTE: in the end of the loop, it swaps buffers, hence our target buffer
# is not tbuf, but bbuf !
self._mm_target = bbuf
self._size = final_target_size
#{ Configuration
if not has_perf_mod:
_set_cache_ = _set_cache_brute_
else:
_set_cache_ = _set_cache_too_slow_without_c
#} END configuration
def read(self, count=0):
bl = self._size - self._br # bytes left
if count < 1 or count > bl:
count = bl
# NOTE: we could check for certain size limits, and possibly
# return buffers instead of strings to prevent byte copying
data = self._mm_target.read(count)
self._br += len(data)
return data
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Allows to reset the stream to restart reading
:raise ValueError: If offset and whence are not 0"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self._br = 0
self._mm_target.seek(0)
#{ Interface
@classmethod
def new(cls, stream_list):
"""
Convert the given list of streams into a stream which resolves deltas
when reading from it.
:param stream_list: two or more stream objects, first stream is a Delta
to the object that you want to resolve, followed by N additional delta
streams. The list's last stream must be a non-delta stream.
:return: Non-Delta OPackStream object whose stream can be used to obtain
the decompressed resolved data
:raise ValueError: if the stream list cannot be handled"""
if len(stream_list) < 2:
raise ValueError("Need at least two streams")
# END single object special handling
if stream_list[-1].type_id in delta_types:
raise ValueError(
"Cannot resolve deltas if there is no base object stream, last one was type: %s" % stream_list[-1].type)
# END check stream
return cls(stream_list)
#} END interface
#{ OInfo like Interface
@property
def type(self):
return self._bstream.type
@property
def type_id(self):
return self._bstream.type_id
@property
def size(self):
""":return: number of uncompressed bytes in the stream"""
return self._size
#} END oinfo like interface
#} END RO streams
#{ W Streams
class Sha1Writer(object):
"""Simple stream writer which produces a sha whenever you like as it degests
everything it is supposed to write"""
__slots__ = "sha1"
def __init__(self):
self.sha1 = make_sha()
#{ Stream Interface
def write(self, data):
""":raise IOError: If not all bytes could be written
:param data: byte object
:return: length of incoming data"""
self.sha1.update(data)
return len(data)
# END stream interface
#{ Interface
def sha(self, as_hex=False):
""":return: sha so far
:param as_hex: if True, sha will be hex-encoded, binary otherwise"""
if as_hex:
return self.sha1.hexdigest()
return self.sha1.digest()
#} END interface
class FlexibleSha1Writer(Sha1Writer):
"""Writer producing a sha1 while passing on the written bytes to the given
write function"""
__slots__ = 'writer'
def __init__(self, writer):
Sha1Writer.__init__(self)
self.writer = writer
def write(self, data):
Sha1Writer.write(self, data)
self.writer(data)
class ZippedStoreShaWriter(Sha1Writer):
"""Remembers everything someone writes to it and generates a sha"""
__slots__ = ('buf', 'zip')
def __init__(self):
Sha1Writer.__init__(self)
self.buf = BytesIO()
self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
def __getattr__(self, attr):
return getattr(self.buf, attr)
def write(self, data):
alen = Sha1Writer.write(self, data)
self.buf.write(self.zip.compress(data))
return alen
def close(self):
self.buf.write(self.zip.flush())
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
"""Seeking currently only supports to rewind written data
Multiple writes are not supported"""
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
raise ValueError("Can only seek to position 0")
# END handle offset
self.buf.seek(0)
def getvalue(self):
""":return: string value from the current stream position to the end"""
return self.buf.getvalue()
class FDCompressedSha1Writer(Sha1Writer):
"""Digests data written to it, making the sha available, then compress the
data and write it to the file descriptor
**Note:** operates on raw file descriptors
**Note:** for this to work, you have to use the close-method of this instance"""
__slots__ = ("fd", "sha1", "zip")
# default exception
exc = IOError("Failed to write all bytes to filedescriptor")
def __init__(self, fd):
super(FDCompressedSha1Writer, self).__init__()
self.fd = fd
self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
#{ Stream Interface
def write(self, data):
""":raise IOError: If not all bytes could be written
:return: lenght of incoming data"""
self.sha1.update(data)
cdata = self.zip.compress(data)
bytes_written = write(self.fd, cdata)
if bytes_written != len(cdata):
raise self.exc
return len(data)
def close(self):
remainder = self.zip.flush()
if write(self.fd, remainder) != len(remainder):
raise self.exc
return close(self.fd)
#} END stream interface
class FDStream(object):
"""A simple wrapper providing the most basic functions on a file descriptor
with the fileobject interface. Cannot use os.fdopen as the resulting stream
takes ownership"""
__slots__ = ("_fd", '_pos')
def __init__(self, fd):
self._fd = fd
self._pos = 0
def write(self, data):
self._pos += len(data)
os.write(self._fd, data)
def read(self, count=0):
if count == 0:
count = os.path.getsize(self._filepath)
# END handle read everything
bytes = os.read(self._fd, count)
self._pos += len(bytes)
return bytes
def fileno(self):
return self._fd
def tell(self):
return self._pos
def close(self):
close(self._fd)
class NullStream(object):
"""A stream that does nothing but providing a stream interface.
Use it like /dev/null"""
__slots__ = tuple()
def read(self, size=0):
return ''
def close(self):
pass
def write(self, data):
return len(data)
#} END W streams
|
the-stack_106_25714 | """A main program for Byterun."""
import argparse
import logging
from . import execfile
parser = argparse.ArgumentParser(
prog="byterun",
description="Run Python programs with a Python bytecode interpreter.",
)
parser.add_argument(
'-m', dest='module', action='store_true',
help="prog is a module name, not a file name.",
)
parser.add_argument(
'-v', '--versbose', dest='verbose', action='store_true',
help="trace the execution of the bytecode.",
)
parser.add_argument(
'prog',
help="The program to run.",
)
parser.add_argument(
'args', nargs=argparse.REMAINDER,
help="Arguments to pass to the program.",
)
args = parser.parse_args()
if args.module:
run_fn = execfile.run_python_module
else:
run_fn = execfile.run_python_file
level = logging.DEBUG if args.verbose else logging.WARNING
logging.basicConfig(level=level)
argv = [args.prog] + args.args
run_fn(args.prog, argv)
|
the-stack_106_25715 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Program for filtering variants from a VCF file to *de novo* variants.
This program implements the filters and heuristics similar to the one by Wong et al. and
Besenbacher et al.
"""
import argparse
import collections
import datetime
import itertools
import logging
import sys
import pysam
import vcfpy
from vcfpy.exceptions import InvalidRecordException
from snappy_wrappers import genome_regions, sweep
# PED Parsing ======================================================================================
Individual = collections.namedtuple(
"Individual", ["family", "name", "father", "mother", "gender", "affected"]
)
# gender/sex
Individual.UNKNOWN = "0"
Individual.MALE = "1"
Individual.FEMALE = "2"
# affection state
Individual.UNAFFECTED = "1"
Individual.AFFECTED = "2"
class Pedigree:
"""Representation of a pedigree."""
@classmethod
def load(klass, file):
entries = []
for line in file:
entries.append(Individual(*line.strip().split("\t")[:6]))
return Pedigree(entries)
def __init__(self, entries):
"""Initialize with an iterable of entries."""
self.entries = list(entries)
self._father_of = {e.name: e.father for e in self.entries}
self._mother_of = {e.name: e.mother for e in self.entries}
self._by_name = {e.name: e for e in self.entries}
self.by_family = {}
for entry in self.entries:
self.by_family.setdefault(entry.family, []).append(entry)
def get_individual(self, name):
"""Return ``Individual`` object with the given name or ``None``."""
return self._by_name.get(name)
def get_father(self, name):
"""Return id of father, if any, otherwise ``None``."""
result = self._father_of.get(name)
if result == "0":
return None
return result
def get_mother(self, name):
"""Return id of mother, if any, otherwise ``None``."""
result = self._mother_of.get(name)
if result == "0":
return None
return result
def print_ped(self, file):
for e in self.entries:
print("\t".join(map(str, e)), file=file)
def to_string(self):
return "\n".join("\t".join(map(str, e)) for e in self.entries)
# Variant Annotation Related =======================================================================
class ClippedRegion(genome_regions.GenomeRegion):
NONE = 0
LEFT = 1
RIGHT = 2
BOTH = 3
def __init__(self, chrom, begin, end, side):
super().__init__(chrom, begin, end)
self.side = side
def is_left(self):
return bool(self.side & 1)
def is_right(self):
return bool(self.side & 2)
def is_both(self):
return bool(self.side == 3)
class VcfProcessor:
"""Base class for VCF processors that augment VCF headers and modify records."""
def __init__(self, file_in, file_out, args=None):
#: file-like object for reading VCF file from
self.file_in = file_in
#: file-like object for writing VCF file to
self.file_out = file_out
#: ``vcfpy.Reader`` for reading VCF records
self.reader = None
#: ``vcfpy.Writer`` for writing VCF records
self.writer = None
#: Configuration
self.args = args
#: Number of written records
self.count_written = 0
#: Time of last block completion
self.time_prev = None
#: Position of last block completion
self.pos_prev = (None, 0)
def process_record(self, record):
"""Process record and write out record using ``write_record``.
Records have to be written explicitely so it is possible to hold back records, e.g., in the
case of BND records.
"""
self.write_record(record)
def done_processing(self):
"""Called after processing each record.
This can be used for flushing functionality in case records have been held back.
"""
def augment_header(self, header):
"""Augment header
The default implementation fixes the meta information that was transformed into an
``OrderedDict``.
"""
return header
def write_record(self, record):
"""Write out processed record"""
if self.count_written == 0:
self.time_prev = datetime.datetime.now()
self.pos_prev = (record.CHROM, record.POS)
self.count_written += 1
if self.count_written % 100000 == 0:
this_time = datetime.datetime.now()
if record.CHROM != self.pos_prev[0]:
mbp = "?"
else:
mbp = (record.POS - self.pos_prev[1]) / 1000 / 1000
mbp = "%.1f" % mbp
spent = (this_time - self.time_prev).total_seconds()
logging.info(
"written %s records / %s Mbp / %s:%s in %.1fs",
"{:,}".format(self.count_written),
mbp,
record.CHROM,
"{:,}".format(record.POS),
spent,
)
self.time_prev = this_time
self.pos_prev = (record.CHROM, record.POS)
self.writer.write_record(record)
def run(self):
"""Perform the processing
After processing, the written VCF file will have an augmented header and the records were
processed with ``process_record``.
"""
logging.info("Opening input VCF file...")
self.reader = vcfpy.Reader.from_path(self.file_in)
self.on_after_vcf_reader_creation()
logging.info("Augmenting VCF header...")
header = self.augment_header(self.reader.header)
logging.info("Opening output VCF file...")
self.writer = vcfpy.Writer.from_path(self.file_out, header)
self.on_after_vcf_writer_creation()
logging.info("Processing records...")
self._process_records()
self.done_processing()
def _process_records(self):
"""Process records, either all or only from regions"""
no = 0
if not self.args or not self.args.regions:
logging.info("Process whole file/genome")
for no, record in enumerate(self._yield_skip_invalid(iter(self.reader))):
self.process_record(record)
logging.info("Processed %d records", no)
else:
for region in self.args.regions:
logging.info("Processing region %s", region)
region = "".join([x for x in region if x != ","])
chrom, begin_end = region.split(":", 1)
begin, end = begin_end.split("-", 1)
begin, end = int(begin) - 1, int(end)
try:
it = iter(self.reader.fetch(chrom, begin, end))
no += 1
except ValueError as e:
it = []
logging.warning("WARNING: caught exception %s", e)
for record in self._yield_skip_invalid(it):
if (record.POS - 1 >= end) or (
record.INFO.get("END", record.affected_end) < begin
):
continue
self.process_record(record)
logging.info("Processed %d records", no)
def _yield_skip_invalid(self, it):
"""Wrapper that yields from iterator ``it``, errors ignored when configured so.
We need to skip invalid records as for some reason HTSJDK sometimes writes
out empty records. :(
"""
if self.args and self.args.skip_invalid:
yield from it
else:
while True:
try:
yield next(it)
except InvalidRecordException as e: # bad but not fatal
logging.exception("Found invalid record, skipping. Exception was: %s", e)
except StopIteration:
break # stop iterating
def on_after_vcf_reader_creation(self):
"""Hook called after VCF reader has been set"""
def on_after_vcf_writer_creation(self):
"""Hook called after VCF writer has been set"""
class IsMnvAnnotationStep:
"""Flags MNV variants."""
def __init__(self, processor):
self.processor = processor
self.args = self.processor.args
self.pedigree = self.processor.pedigree
def augment_header(self, header):
"""Add INFO line to ``header``."""
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "IsMNV"),
("Type", "Flag"),
("Number", "0"),
("Description", "Valid MNV by itself"),
]
)
)
return header
def trim_common(self, a, b):
while a and b and a[0] == b[0]:
a = a[1:]
b = b[1:]
while a and b and a[-1] == b[-1]:
a = a[:-1]
b = b[:-1]
return a, b
def annotate(self, record):
"""Apply de novo filter for variant."""
if len(record.ALT) != 1:
return # We currently ignore non-biallelic sites
if len(record.REF) == 1 and len(record.ALT[0].value) == 1:
return # Nothing to do
ref, alt = self.trim_common(record.REF, record.ALT[0].value)
if (len(ref) >= 1 and len(alt) >= 2) or (len(ref) >= 2 and len(alt) >= 1):
record.INFO["IsMNV"] = True
class DeNovoAnnotationStep:
"""Apply de novo filtering.
A variant is marked as "de novo" for an individual if both parents are present and it is "0/0"
in both parents.
"""
def __init__(self, processor):
self.processor = processor
self.args = self.processor.args
self.pedigree = self.processor.pedigree
def augment_header(self, header):
"""Augment ``header`` with ``INFO`` lines."""
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "DeNovo"),
("Type", "String"),
("Number", "."),
("Description", "Samples for which the variant is de novo"),
]
)
)
return header
def annotate(self, record):
"""Apply denovo filter for variant"""
if len(record.ALT) != 1:
return # We currently ignore non-biallelic sites
denovo_samples = []
for sample in record.call_for_sample.keys():
father = self.pedigree.get_father(sample)
mother = self.pedigree.get_mother(sample)
if not father or not mother:
continue # skip
gt_sample = record.call_for_sample[sample]
gt_father = record.call_for_sample[father]
gt_mother = record.call_for_sample[mother]
# Perform filtration based on genotypes alone
if gt_sample.data["GT"] not in ("0/1", "0|1", "1/0", "1|0"):
continue # skip, is not HET
if gt_father.data["GT"] not in ("0/0", "0|0"):
continue # skip, is not wild type
if gt_mother.data["GT"] not in ("0/0", "0|0"):
continue # skip, is not wild type
# If we reach here then the call is de novo for the child
denovo_samples.append(sample)
# Annotate de novo samples:
if denovo_samples:
record.INFO["DeNovo"] = list(sorted(denovo_samples))
logging.info(
"Variant at %s:%d is de novo for %s",
record.CHROM,
record.POS,
record.INFO["DeNovo"],
)
class HaplotypeProcessor:
"""Haplotype and phasing based processing
Can be disabled with self.args.use_phase_info == False.
"""
def __init__(self, soft_filter_proc, args):
self.soft_filter_proc = soft_filter_proc
self.args = args
self.pedigree = self.soft_filter_proc.pedigree
self.sweeper = sweep.OverlappingWindowSweeper(
4 * self.soft_filter_proc.args.haplotype_window,
self.soft_filter_proc.args.haplotype_window,
self.process_record,
vcf_record_to_genome_position,
)
self.father, self.mother = self._get_parents()
def augment_header(self, header):
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "DeNovoOrigin"),
("Type", "String"),
("Number", "."),
("Description", "Marker for de novo haplotype origin"),
]
)
)
return header
def _get_parents(self):
"""Return name of parents"""
father, mother = None, None
for entry in self.pedigree.entries:
if entry.father != "0":
if father is None:
father = entry.father
else:
if father != entry.father:
raise Exception("Inconsistent pedigree, seen different fathers!")
if entry.mother != "0":
if mother is None:
mother = entry.mother
else:
if mother != entry.mother:
raise Exception("Inconsistent pedigree, seen different mothers!")
if father is None or mother is None:
raise Exception("Missing father or mother in pedigree!")
return father, mother
def process_record(self, record, left, right):
"""Process record with neighborhood"""
# TODO: much better would be to process all inner of a window at once
# Check whether record is de novo at all
if "DeNovo" not in record.INFO:
return # skip
logging.info("Trying to phase de novo record at %s:%d", record.CHROM, record.POS)
# Get index haplotype id
index_hps = {}
for name, sample in record.call_for_sample.items():
if name == self.father and name == self.mother:
continue # only indices
if not sample.data.get("HP"):
continue # no haplotype information
# Store name of ALT haplotype here without "/" or "|" separator;
# will be one of "00", "01", "10" or "11".
sample_gt = "".join(x for x in sample.data.get("GT", "0/0") if x in "01")[:2]
if "1" in sample_gt:
index_hps[name] = sample.data["HP"][sample_gt.index("1")]
if not index_hps:
logging.info("=> halotype information is not available")
return # skip, no haplotype information
# Look whether we can find the haplotype for the index in a phased
# variant that is phase informative. If this is the case then we can
# use this to trace the haplotype to its origin. We only look into
# the haplotypes in the indices.
origin = {} # for each sample
for rec in itertools.chain([record], left, right):
logging.info("Considering %s:%s", rec.CHROM, rec.POS)
for name, sample in rec.call_for_sample.items():
if name == self.father or name == self.mother:
continue # only indices here
if not sample.is_phased:
logging.info("not phased on %s:%s", rec.CHROM, rec.POS)
continue # no phasing for sample
if not sample.data.get("HP", None):
logging.info("no haplotype in %s:%s", rec.CHROM, rec.POS)
continue # no haplotype information for sample
if name not in index_hps:
logging.info("%s not in index_hps", name)
continue # no de novo HP for this index
logging.info("-> record is phased and has haplotype")
# If we reach here, we have a phased sample with a haplotype
# in the current record.
index_gt = sample.data.get("GT")
if not index_gt:
continue # no genotype, skip
assert "/" not in index_gt, "Must not be phased"
if index_hps[name] not in sample.data["HP"]:
continue # haplotype info does not match
# Try to infer paternal/maternal information from phasing
try:
idx_paternal = 0 if self.args.phase_paternal_first else 1
if sample.data["HP"].index(index_hps[name]) == idx_paternal:
evidence = "paternal"
else:
evidence = "maternal"
except ValueError:
logging.info("-> record is not in HP")
continue # skip, not in HP
logging.info("-> found evidence for %s origin", evidence)
# Convert flags to annotation in file
if name not in origin:
origin[name] = evidence
elif origin[name] != evidence:
origin[name] = "inconsistent"
logging.info(
"%s denovo=%s:%s-> %s",
name,
record.CHROM,
"{:,}".format(record.POS),
origin[name],
)
# Check whether index hps of record consistent with child
info_value = []
for index, rating in sorted(origin.items()):
other = "."
if rating == "maternal":
other = self.mother
elif rating == "paternal":
other = self.father
info_value.append("{}|{}|{}".format(index, rating, other))
# print('info', info_value, file=sys.stderr)
if info_value:
record.INFO["DeNovoOrigin"] = info_value
def push(self, record):
if not self.args.use_phase_info:
# Handle case of not using phase information and short-circuit
self.soft_filter_proc.write(record)
return
for done_record in self.sweeper.push(record):
self.soft_filter_proc.write(done_record)
def finish(self):
if not self.args.use_phase_info:
return # short-circuit, nothing to do
# print('Finishing haplo...', file=sys.stderr)
for done_record in self.sweeper.finish():
self.soft_filter_proc.write(done_record)
def vcf_record_to_genome_position(record):
return sweep.GenomePosition(record.CHROM, record.POS - 1)
class NeighborhoodProcessor:
def __init__(self, soft_filter_proc, haplo_proc):
self.soft_filter_proc = soft_filter_proc
self.args = self.soft_filter_proc.args
self.haplo_proc = haplo_proc
self.sweeper = sweep.OverlappingWindowSweeper(
3 * self.soft_filter_proc.args.exclusive_neighborhood,
self.soft_filter_proc.args.exclusive_neighborhood,
self.process_record,
vcf_record_to_genome_position,
)
def augment_header(self, header):
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "Neighbor"),
("Type", "String"),
("Number", "."),
("Description", "Another de novo variant in neighborhood"),
]
)
)
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "NeighborMNV"),
("Type", "String"),
("Number", "."),
("Description", "Another de novo variant in MNV neighborhood"),
]
)
)
return header
def process_record(self, record, left, right):
"""Process record with tentative neighbors"""
# print('nbr processing', file=sys.stderr)
for other in itertools.chain(left, right):
if self._is_neighbor(record, other, self.args.exclusive_neighborhood):
self._flag(record, other, "Neighbor")
if self._is_neighbor(record, other, self.args.mnv_neighborhood):
self._flag(record, other, "NeighborMNV")
def _is_neighbor(self, lhs, rhs, dist):
if lhs.CHROM != rhs.CHROM:
return False
return abs(lhs.POS - rhs.POS) < dist
def _flag(self, record, other, flag):
if "DeNovo" in record.INFO and "DeNovo" in other.INFO:
in_record = set(record.INFO["DeNovo"])
in_other = set(other.INFO["DeNovo"])
in_both = in_record & in_other
if not in_both:
return # skip, not de novo in both
record_n = set(record.INFO.get(flag, []))
other_n = set(other.INFO.get(flag, []))
record.INFO[flag] = list(sorted(record_n | in_both))
other.INFO[flag] = list(sorted(other_n | in_both))
def finish(self):
logging.info("Finishing nbr...")
for done_record in self.sweeper.finish():
self.haplo_proc.push(done_record)
self.haplo_proc.finish()
def push(self, record):
for done_record in self.sweeper.push(record):
self.haplo_proc.push(done_record)
class BesenbacherFilterAnnotationStep:
"""Apply "Besenbacher" filter to de novo calls
Must be after the DeNovoAnnotationStep
"""
def __init__(self, processor):
self.processor = processor
self.args = self.processor.args
self.pedigree = self.processor.pedigree
self.warned = False
def annotate(self, record):
if not record.INFO.get("DeNovo"):
return # skip, no sample marked as de novo
for child in record.INFO.get("DeNovo"):
# shortcuts to names of mother and father
father = self.pedigree.get_father(child)
mother = self.pedigree.get_mother(child)
# shortcuts to genotype calls
gt_child = record.call_for_sample[child]
gt_father = record.call_for_sample[father]
gt_mother = record.call_for_sample[mother]
if "AD" in gt_child.data: # looks like GATK
self._fix_gatk_record(record)
if not self.annotate_gatk(record, gt_child, gt_father, gt_mother):
break # added Besenbacher filter
elif "NV" not in gt_child.data: # looks like Platypus
if not self.annotate_platypus(record, gt_child, gt_father, gt_mother):
break # added Besenbacher filter
else:
if not self.warned:
self.warned = True
logging.warning(
"WARNING: missing AD or NV FORMAT, neither Platypus nor GATK; "
"warning only shown once"
)
logging.warning("%s", gt_child.data)
def _fix_gatk_record(self, record):
"""Fix GATK record in case of ``.`` occurenced, entries AD and DP."""
for call in record.calls:
if "AD" in call.data and call.data.get("AD") is None:
call.data["AD"] = [0 for i in range(len(call.ALT or [])) + 1]
if "DP" in call.data and call.data.get("DP") is None:
call.data["DP"] = 0
def augment_header(self, header):
"""Augment and return augmented ``header``."""
header.add_filter_line(
vcfpy.OrderedDict(
[
("ID", "Besenbacher"),
(
"Description",
"Does not satisfy Besenbacher filter (only considered for tentative de novo calls",
),
]
)
)
header.add_filter_line(
vcfpy.OrderedDict(
[("ID", "InParent"), ("Description", "Variant also seen in a parent")]
)
)
header.add_filter_line(
vcfpy.OrderedDict(
[("ID", "InFamily"), ("Description", "Variant also seen in another family member")]
)
)
return header
def annotate_platypus(self, record, gt_child, gt_father, gt_mother):
besenbacher = False
# perform Besenbacher filtering
if not gt_child.data["NR"] or gt_child.data["GQ"] is None:
return False
if (
gt_child.data["NR"] < self.args.min_dp
or gt_child.data["NR"] > self.args.max_dp
or gt_child.data["GQ"] < self.args.min_gq
):
record.add_filter("Besenbacher")
besenbacher = True
assert gt_child.data["NR"] > 0 # the following might otherwise fail
ab_child = gt_child.data["NV"] / gt_child.data["NR"]
if ab_child < self.args.min_ab or ab_child > self.args.max_ab:
if not besenbacher:
record.add_filter("Besenbacher")
besenbacher = True
for gt in record.calls:
if gt.sample != gt_child.sample:
if gt.data["NV"] and gt.data["NV"] > 0:
if record.FILTER and "InFamily" not in record.FILTER:
record.add_filter("InFamily")
break
for gt in [gt_father, gt_mother]:
if gt.data["NV"] and gt.data["NV"] > 0:
if record.FILTER and "InParent" not in record.FILTER:
record.add_filter("InParent")
if any(gt.data[key] is None for key in ("NR", "NV", "GQ")):
if not besenbacher:
record.add_filter("Besenbacher")
return False
if (
gt.data["NR"] < self.args.min_dp
or gt.data["NR"] > self.args.max_dp
or gt.data["GQ"] < self.args.min_gq
or gt.data["NV"] > self.args.max_ad2
):
if not besenbacher:
record.add_filter("Besenbacher")
besenbacher = True
return not besenbacher
def annotate_gatk(self, record, gt_child, gt_father, gt_mother):
besenbacher = False
# perform Besenbacher filtering
if not gt_child.data["DP"] or gt_child.data["GQ"] is None:
return False
if (
gt_child.data["DP"] < self.args.min_dp
or gt_child.data["DP"] > self.args.max_dp
or gt_child.data["GQ"] < self.args.min_gq
):
record.add_filter("Besenbacher")
besenbacher = True
assert gt_child.data["DP"] > 0 # the following might otherwise fail
ab_child = gt_child.data["AD"][1] / sum(gt_child.data["AD"])
if ab_child < self.args.min_ab or ab_child > self.args.max_ab:
if not besenbacher:
record.add_filter("Besenbacher")
besenbacher = True
for gt in record.calls:
if gt.sample != gt_child.sample:
if gt.data["AD"] and gt.data["AD"][1] > 0:
if record.FILTER and "InFamily" not in record.FILTER:
record.add_filter("InFamily")
break
for gt in [gt_father, gt_mother]:
if gt.data["AD"] and gt.data["AD"][1] > 0:
if record.FILTER and "InParent" not in record.FILTER:
record.add_filter("InParent")
if any(gt.data[key] is None for key in ("DP", "GQ", "AD")):
if not besenbacher:
record.add_filter("Besenbacher")
return False
if (
gt.data["DP"] < self.args.min_dp
or gt.data["DP"] > self.args.max_dp
or gt.data["GQ"] < self.args.min_gq
or gt.data["AD"][1] > self.args.max_ad2
):
if not besenbacher:
record.add_filter("Besenbacher")
besenbacher = True
return not besenbacher
class SoftFilterProcessor(VcfProcessor):
"""VcfProcessor for the soft-annotation of filters.
The following annotation/soft-filtration will be performed:
``INFO/DeNovo``
List of individuals for which the variant is de novo based on their parent calls (both
parent calls must be available.
``INFO/Neighbor``
List of individuals for which there is a de novo variant within 1kb (default) of the
current variant.
``INFO/NeighborMNV``
List of individuals for which there is a de novo variant within 20bp (default) of the
current variant.
``INFO/ClippedStack``
for de novo indels that presumably are caused by a BWA-MEM clipped read stack, require
clipping on both sides.
``INFO/HalfClippedStack``
for de novo indels that presumably are caused by a BWA-MEM clipped read stack, require
clipping on one side only.
``INFO/DeNovoOrigin``
If the variant haplotype of a de novo variant was found in mother/father/both, marks
the variant as maternal/paternal/inconsistent in the form of ``"${offspring}|${class}"``,
e.g. ``"child1|maternal,child2|inconsistent,child3|paternal"``.
``FILTER``
``Besenbacher`` if Besenbacher filter fails.
"""
def __init__(self, args):
super().__init__(args.input_vcf, args.output_vcf, args)
# Setup the logging.
self.offspring_bam_file = pysam.AlignmentFile(args.offspring_bam, "rb")
self._setup_logging()
# Load pedigree.
self.pedigree = self.load_pedigree()
self.counter = 0
self.steps = (
IsMnvAnnotationStep(self),
DeNovoAnnotationStep(self),
BesenbacherFilterAnnotationStep(self),
)
#: Helper for haplotype determination, will write back to this
#: processor.
self.haplo_proc = HaplotypeProcessor(self, self.args)
#: Helper for neighborhood determination, will write into
#: self.haplotype_proc.
self.neighbor_proc = NeighborhoodProcessor(self, self.haplo_proc)
#: (chrom, pos) of haplotype sweeping window start
self.haplo_sweep_chrom = None
self.haplo_sweep_pos = None
def _setup_logging(self):
logging.basicConfig(
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M"
)
logger = logging.getLogger("") # root logger
if self.args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
def load_pedigree(self):
logging.info("Loading pedigree...")
with open(self.args.input_ped, "rt") as inputf:
pedigree = Pedigree.load(inputf)
logging.info("Pedigree:\n%s", pedigree.to_string())
return pedigree
def augment_header(self, header):
"""Augment meta/header information in reader"""
# TODO: can we use pedigree from VCF in the future?
header = super().augment_header(header)
header = self.haplo_proc.augment_header(header)
header = self.neighbor_proc.augment_header(header)
header = self._add_infos(header)
return header
def _add_infos(self, header):
"""Add INFO header records"""
# Register the INFO headers that we will add.
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "ClippedStack"),
("Type", "Flag"),
("Number", 0),
(
"Description",
"Presumably caused by BWA-MEM clipped read stack for index sample (two-sided "
"clipping), ONLY ANNOTATED FOR INDEX",
),
]
)
)
header.add_info_line(
vcfpy.OrderedDict(
[
("ID", "HalfClippedStack"),
("Type", "Flag"),
("Number", 0),
(
"Description",
"Presumably caused by BWA-MEM clipped read stack for index sample (at least "
"one-sided clipping), ONLY ANNOTATED FOR INDEX",
),
]
)
)
# Register headers from sub steps.
for step in self.steps:
header = step.augment_header(header)
return header
def _add_pedigree(self, reader):
"""Add PEDIGREE header records, if not already present"""
if reader.metadata.get("PEDIGREE"):
return # information already there
# Extend META fields
# TODO(holtgrewe): Don't append already existing one
metas = [
collections.OrderedDict(
[
("ID", "Sex"),
("Type", "String"),
("Number", "1"),
# ('Values', 'UNKNOWN,MALE,FEMALE'),
]
),
collections.OrderedDict(
[
("ID", "Disease"),
("Type", "String"),
("Number", "1"),
# ('Values', 'UNKNOWN,UNAFFECTED,AFFECTED'),
]
),
collections.OrderedDict([("ID", "Father"), ("Type", "String"), ("Number", "1")]),
collections.OrderedDict([("ID", "Mother"), ("Type", "String"), ("Number", "1")]),
]
reader.metadata.setdefault("META", [])
reader.metadata["META"] += metas
# Write out PEDIGREE fields
entries = []
for entry in self.pedigree.entries:
val = collections.OrderedDict()
val["ID"] = entry.name
if entry.father != Individual.UNKNOWN:
val["Father"] = entry.father
if entry.mother != Individual.UNKNOWN:
val["Mother"] = entry.mother
if entry.gender == Individual.UNKNOWN:
val["Sex"] = "UNKNOWN"
elif entry.gender == Individual.MALE:
val["Sex"] = "MALE"
else:
val["Sex"] = "FEMALE"
if entry.affected == Individual.AFFECTED:
val["Affected"] = "AFFECTED"
elif entry.affected == Individual.UNAFFECTED:
val["Affected"] = "UNAFFECTED"
else:
val["Affected"] = "UNKNOWN"
entries.append(val)
reader.metadata["PEDIGREE"] = entries
def process_record(self, record):
for step in self.steps:
step.annotate(record)
# Add ClippedStack info for de novos
self._flag_read_stack(record)
# Put into neighborhood and haplotype processing pipeline
self.neighbor_proc.push(record)
def write(self, obj):
"""Write to output through super class process_record()"""
super().process_record(obj)
def done_processing(self):
self.neighbor_proc.finish()
self.writer.close()
def _flag_read_stack(self, record):
if self.args.index_name in record.INFO.get("DeNovo", []):
bam = self.offspring_bam_file
# Collect interesting BAM records that have clipping
RADIUS = 100
FUZZ = 10
bam_records = bam.fetch(record.CHROM, max(0, record.POS - RADIUS), record.POS + RADIUS)
regions = self._bam_records_to_clipped_regions(bam_records)
stacks = self._stack_regions(regions)
# print('RECORD', record, file=sys.stderr)
# print('STACKS', stacks, file=sys.stderr)
# import sys; print(stacks, file=sys.stderr)
for stack in stacks:
num_clipped, num_half_clipped = 0, 0
for region in stack:
# print('BOTH', region.is_both(), file=sys.stderr)
# print('LEFT', region.is_left(), file=sys.stderr)
# print('RIGHT', region.is_right(), file=sys.stderr)
if region.is_both():
num_half_clipped += 1
num_clipped += 1
elif region.is_left() and region.begin + FUZZ >= record.POS:
num_half_clipped += 1
elif region.is_right() and region.end < record.POS + FUZZ:
num_half_clipped += 1
# print('CLIPPING', num_half_clipped, num_clipped,
# file=sys.stderr)
# Add flag, depending on clipped count
MIN_CLIPPED = 3
if num_clipped >= MIN_CLIPPED:
record.INFO["ClippedStack"] = True
if num_half_clipped >= MIN_CLIPPED:
record.INFO["HalfClippedStack"] = True
def _stack_regions(self, clipped_regions):
"""Stack regions using a greedy algorithm"""
current = []
result = [current]
for region in clipped_regions:
if not current:
current.append(region)
elif region.jaccard(current[0]) > 0.5:
current.append(region)
else:
current = [region]
result.append(current)
if not result[-1]:
result.pop()
return result
def _bam_records_to_clipped_regions(self, bam_records):
"""Convert list of BAM records to ClippedRegion objects"""
MASK_IGNORE = (
pysam.FUNMAP | pysam.FSECONDARY | pysam.FQCFAIL | pysam.FDUP | pysam.FSUPPLEMENTARY
)
CLIPPING_THRESH = 20 # ignore smaller clippings
result = []
for bam_record in bam_records:
if bam_record.flag & MASK_IGNORE:
continue # skip unmapped
clippings = [
(o, l)
for (o, l) in bam_record.cigartuples
if o in (pysam.CSOFT_CLIP, pysam.CHARD_CLIP) and l > CLIPPING_THRESH
]
side = ClippedRegion.NONE
if len(clippings) == 2:
side = ClippedRegion.BOTH
elif len(clippings) == 1:
if bam_record.cigartuples[0][0] in (pysam.CSOFT_CLIP, pysam.CHARD_CLIP):
side = ClippedRegion.LEFT
else:
side = ClippedRegion.RIGHT
if side != ClippedRegion.NONE:
result.append(
ClippedRegion(
bam_record.reference_name, bam_record.pos, bam_record.reference_end, side
)
)
return result
def run(args):
"""Program main entry point after parsing command line arguments."""
time_start = datetime.datetime.now()
processor = SoftFilterProcessor(args)
processor.run()
time_end = datetime.datetime.now()
spent = time_end - time_start
logging.info("Spent %.1f s in annotation", spent.total_seconds())
def main(argv=None):
"""Program's main entry point (before parsing command line arguments)."""
# Setup command line parser
parser = argparse.ArgumentParser(
description=(
"Annotate VCF file with various of soft filters for sequence variant de "
"novo filtration"
)
)
parser.add_argument(
"--verbose",
"-v",
dest="verbose",
default=False,
action="store_true",
help="Enable verbose logging",
)
group = parser.add_argument_group("Input / Output")
group.add_argument("--index-name", required=True, help="Name of expected index library")
group.add_argument("--input-vcf", required=True, help="Input VCF file")
group.add_argument("--input-ped", required=True, help="Input PED file")
group.add_argument("--output-vcf", required=True, help="Output VCF file (NOT bgziped)")
group.add_argument(
"--offspring-bam",
type=str,
required=True,
help="Path to BAM file for offspring, for read stack artifact filter of indels",
)
group.add_argument(
"--region",
dest="regions",
type=str,
action="append",
default=[],
nargs="+",
help="(List) of region(s) to process",
)
group.add_argument(
"--skip-invalid",
default=False,
action="store_true",
help="Ignore invalid VCF records, (default: false)",
)
group = parser.add_argument_group("INFO/FORMAT-based annotation (Besenbacher)")
group.add_argument(
"--min-gq", default=50, type=float, help="Minimal GQ quality for Besenbacher filter"
)
group.add_argument("--min-dp", default=10, type=int, help="Minimal DP for Besenbacher filter")
group.add_argument("--max-dp", default=120, type=int, help="Minimal DP for Besenbacher filter")
group.add_argument(
"--min-allele-balance",
dest="min_ab",
default=0.3,
type=float,
help="Minimal alelle balance value",
)
group.add_argument(
"--max-allele-balance",
dest="max_ab",
default=0.7,
type=float,
help="Maximal allele balance value",
)
group.add_argument(
"--max-ad2", default=0, type=int, help="Maximal alternative observation for parent"
)
group = parser.add_argument_group("Neighborhood-based filter")
group.add_argument(
"--exclusive-neighborhood",
type=int,
default=1000,
help="If there is more than one variant in such a neighborhood then flag it with Neighbor",
)
group.add_argument(
"--mnv-neighborhood",
type=int,
default=20,
help="Flag if there is more than one in this neighbourhood with NeighborMNV",
)
group = parser.add_argument_group("Phasing-based annotation")
group.add_argument(
"--use-phase-info",
action="store_true",
default=False,
help="Use phasing information, default is to not use phasing or haplotype information",
)
group.add_argument(
"--haplotype-window",
type=int,
default=100000,
help="Haplotype window size in bp, defaults to 100kbp",
)
group.add_argument(
"--phase-maternal-first",
default=False,
dest="phase_paternal_first",
action="store_true",
help="Phased data shows maternal allele first default is paternal allele first",
)
# Parse arguments, postprocess and kick-off program.
args = parser.parse_args(argv)
args.regions = [item for sublist in args.regions for item in sublist]
run(args)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_25716 |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
x_axix = [0,0.1,0.5,1,2,3]
cer = [66.12,63.07,69.03,63.65,63.46,69.54]
f1 = [47.61,50.69,45.88,50.5,52.38,46.01]
bleu1 = [41.73,50.24,34.09,46.98,45.12,30.18]
bleu2 = [32.8,39,27.1,36.83,36.13,24.51]
unigram = [3.2,2.9,3.5,3,2.9,2.9]
bigram = [40.1,41.8,39.3,40.9,35.2,29.7]
plt.title('α Value Influence')
plt.plot(x_axix, cer, color='green', label='CER',linestyle=':', linewidth=1,
marker='o', markersize=5,
markeredgecolor='black', markerfacecolor='C0')
plt.plot(x_axix, bleu1, color='red', label='Bleu-1',linestyle='--', linewidth=1,
marker=',', markersize=5,
markeredgecolor='black', markerfacecolor='C3')
plt.plot(x_axix, f1, color='skyblue', label='F1 Score',linestyle='-.', linewidth=1,
marker='v', markersize=5,
markeredgecolor='black', markerfacecolor='C2')
plt.plot(x_axix, bleu2, label='Bleu-2',
linestyle=':', linewidth=1,
marker='^', markersize=5,
markeredgecolor='black', markerfacecolor='C1'
)
plt.plot(x_axix, unigram, color='magenta', label='Unigrams Distinct',linestyle=':', linewidth=1,
marker='>', markersize=5,
markeredgecolor='black', markerfacecolor='C5')
plt.plot(x_axix, bigram, color='blue', label='Bigrams Distinct',linestyle=':', linewidth=1,
marker='<', markersize=5,
markeredgecolor='black', markerfacecolor='C5')
plt.legend() # 显示图例
plt.xlabel('α Value in Loss')
plt.ylabel('rate(%)')
plt.show()
|
the-stack_106_25717 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import unittest
from collections import defaultdict
from chinese_calendar.constants import holidays, workdays
class HolidayAmountTests(unittest.TestCase):
def test_holiday_amount(self):
holiday_amounts = defaultdict(int)
for date in holidays.keys():
if date.weekday() <= 4:
holiday_amounts[date.year] += 1
for date in workdays.keys():
if date.weekday() > 4:
holiday_amounts[date.year] -= 1
holiday_amounts[2007] -= 2 # 07年法定节假日有13天(国庆多了两天)
holiday_amounts[2008] -= 2 # 08年同上
holiday_amounts[2011] += 1 # 11年要补班12年的元旦假期
holiday_amounts[2012] -= 1 # 12年可以享受11年补班的假
holiday_amounts[2015] -= 1 # 15年是中国人民抗日战争暨世界反法西斯战争胜利70周年,多放1天
for year in range(2007, 2018 + 1): # 06年数据少,不测了
self.assertEqual(11, holiday_amounts[year], 'Holiday amount of year {}'.format(year))
self.assertEqual(1, 1)
|
the-stack_106_25718 | #!/usr/bin/env python
# This shows how to leverage the endpoints API to get a new hidden
# service up and running quickly. You can pass along this API to your
# users by accepting endpoint strings as per Twisted recommendations.
#
# http://twistedmatrix.com/documents/current/core/howto/endpoints.html#maximizing-the-return-on-your-endpoint-investment
#
# note that only the progress-updates needs the "import txtorcon" --
# you do still need it installed so that Twisted finds the endpoint
# parser plugin but code without knowledge of txtorcon can still
# launch a Tor instance using it. cool!
from __future__ import print_function
from twisted.internet import defer, task, endpoints
from twisted.web import server, resource
import txtorcon
class Simple(resource.Resource):
"""
A really simple Web site.
"""
isLeaf = True
def render_GET(self, request):
return b"<html>Hello, world! I'm a prop224 Onion Service!</html>"
@defer.inlineCallbacks
def main(reactor):
tor = yield txtorcon.connect(
reactor,
endpoints.TCP4ClientEndpoint(reactor, "localhost", 9251),
)
print("{}".format(tor))
hs = yield tor.create_filesystem_onion_service(
[(80, 8787)],
"./prop224_hs",
version=3,
)
print("{}".format(hs))
print(dir(hs))
ep = endpoints.TCP4ServerEndpoint(reactor, 8787, interface="localhost")
yield ep.listen(server.Site(Simple())) # returns 'port'
print("Site listening: {}".format(hs.hostname))
print("Private key:\n{}".format(hs.private_key))
yield defer.Deferred() # wait forever
task.react(main)
|
the-stack_106_25719 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
def is_leap(year):
"""
输入年份 如果是闰年输出True 否则输出False
"""
try:
year = int(year)
except Exception as err:
print(err)
else:
is_leap = year % 4 == 0 and year % 100 != 0 or \
year % 400 == 0
return is_leap
if __name__ == '__main__':
year = input('请输入年份: ')
res = is_leap(year)
print(f'{year} 是否闰年:{res}')
|
the-stack_106_25723 | import os
import django
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# the login url
LOGIN_URL='/login'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media')
# Absolute path to the directory that holds cloned repositories.
REPO_ROOT = os.path.join(SITE_ROOT, 'clones')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/site-media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# append slashes to URLs
APPEND_SLASH = True
# Path where screenshots and thumbnails should be placed
SCREENSHOT_PATH = os.path.join(MEDIA_ROOT, 'screenshots')
# Root URL for screenshots
SCREENSHOT_URL = "/site-media/screenshots/"
# The maximum number of threads to use when fetching blogs
BLOG_FETCH_PROCESS_COUNT = 10
# The maximum number of concurrent processes to run when fetching repos
REPO_FETCH_PROCESS_COUNT = 4
# The number of minutes before a repository fetch should timeout.
#
# This doesn't apply to the time it takes the clone the repository, just to
# the amount of time it takes to read the logs and generate diffs.
# "Some People" commit massive diffs, other than that this should never be
# an issue unless your computer is very, very slow.
REPO_FETCH_TIMEOUT = 1
# scoring thresholds
GREEN_SCORE = 2880 # everything up to this score will be green
RED_SCORE = 172800 # everything after this score will be red
UNCERTAIN_SCORE = 10000 # everything after this score will be uncertain face
UNHAPPY_SCORE = 86400 # everything after this score will be unhappy face
# the "worst" score allowed, in minutes
MAX_SCORE_MINUTES = 3024000
# The web address that observatory is hosted on
DOMAIN_NAME = "http://localhost:8000"
# The title prepended to any RSS feeds
FEED_TITLE = "Observatory"
# The number of items that should appear in dashboard-wide feeds
FEED_COUNT = 30
# The number of items that should appear in project-specific feeds
PROJECT_FEED_COUNT = 10
# use production jquery for production, debug for debug
JQUERY = [os.path.join(MEDIA_URL, "js", "jquery-1.4.4.min.js"),
os.path.join(MEDIA_URL, "js", "jquery-1.4.4.js")][DEBUG]
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j+e*h2ket2cf2w##m2fzjp392%68!a^xcjo+_lr_-(^d8c3ea5'
# if True, attempts to use the devserver replacement for runserver in debug
# https://github.com/dcramer/django-devserver
TRY_DEVSERVER = False
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'observatory.middleware.CssSmasher'
)
ROOT_URLCONF = 'observatory.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates')
)
INSTALLED_APPS = (
'devserver',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'dashboard',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
)
# import recaptcha keys
try:
from settings_recaptcha import RECAPTCHA_PUBLIC, RECAPTCHA_PRIVATE
except:
RECAPTCHA_PUBLIC = None
RECAPTCHA_PRIVATE = None
# automatically use devserver if it is available and requested
USE_DEVSERVER = False
if DEBUG and TRY_DEVSERVER:
try:
import devserver
USE_DEVSERVER = True
except:
pass
if not USE_DEVSERVER:
INSTALLED_APPS = INSTALLED_APPS[1:]
DEVSERVER_MODULES = (
'devserver.modules.sql.SQLSummaryModule',
)
# Client side files
CSS_FILES=[
"css/author-requests.css",
"css/base.css",
"css/basics.css",
"css/buttons.css",
"css/decorations.css",
"css/diffs.css",
"css/events.css",
"css/forms.css",
"css/modify.css",
"css/profiles.css",
"css/projects.css",
]
JS_FILES=[
"js/lib/vendor/jquery-1.4.4.js",
"js/lib/vendor/jquery.html5form-1.2.js",
"js/globals.js",
"js/lib/lightbox.js",
# forms
"js/lib/form/Form.js",
"js/lib/form/ExclusiveOrForms.js",
# pages
"js/lib/page/Page.js",
"js/lib/page/LoginRegisterPage.js",
"js/lib/page/AddProjectPage.js",
"js/lib/page/ShowProjectPage.js",
"js/lib/page/ModifyProjectPage.js",
"js/init.js"
]
## Page header
HEADER_TEMPLATE = 'header.html'
## Favicon
FAVICON_PATH = '/site-media/rcos/favicon.ico'
|
the-stack_106_25726 | from logging import getLogger
import tkinter as tk
import traceback
from typing import Optional, Tuple, List
from thonny import get_workbench
from thonny.codeview import SyntaxText, CodeViewText, get_syntax_options_for_tag
from thonny.common import SignatureInfo, SignatureParameter
from thonny.editors import Editor
from thonny.misc_utils import running_on_mac_os
from thonny.shell import ShellText
from thonny.tktextext import TextFrame
from thonny.ui_utils import get_tk_version_info
all_boxes = []
a_box_is_appearing = False
logger = getLogger(__name__)
class EditorInfoBox(tk.Toplevel):
def __init__(self):
super().__init__(master=get_workbench())
self._has_shown_on_screen: bool = False
all_boxes.append(self)
self._target_text_widget: Optional[SyntaxText] = None
get_workbench().bind("<FocusOut>", self._workbench_focus_out, True)
# If the box has received focus, then it may lose it by a messagebox
# or mouse click on the main window
self.bind("<FocusOut>", self._workbench_focus_out, True)
get_workbench().bind("<Escape>", self.hide, True)
self.bind("<Escape>", self.hide, True)
get_workbench().bind_class("EditorCodeViewText", "<1>", self.hide, True)
get_workbench().bind_class("ShellText", "<1>", self.hide, True)
get_workbench().bind("SyntaxThemeChanged", self._update_theme, True)
def _set_window_attributes(self):
if running_on_mac_os():
try:
# Must be the first thing to do after creating window
# https://wiki.tcl-lang.org/page/MacWindowStyle
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
if get_tk_version_info() >= (8, 6, 10) and running_on_mac_os():
self.wm_overrideredirect(1)
except tk.TclError:
pass
else:
self.wm_overrideredirect(1)
self.wm_transient(get_workbench())
# From IDLE
# TODO: self.update_idletasks() # Need for tk8.6.8 on macOS: #40128.
self.lift()
def _update_theme(self, event=None):
pass
def _check_bind_for_keypress(self, text: tk.Text):
tag_prefix = "pb_" + type(self).__name__.replace(".", "_")
if getattr(text, tag_prefix, False):
return False
# Need to know about certain keypresses while the completer is active
# Other plugins (eg. auto indenter) may eat up returns, therefore I need to
# raise the priority of this binding
tag = tag_prefix + "_" + str(text.winfo_id())
text.bindtags((tag,) + text.bindtags())
text.bind_class(tag, "<Key>", self._on_text_keypress, True)
setattr(text, tag_prefix, True)
def _on_text_keypress(self, event=None):
pass
def _workbench_focus_out(self, event=None) -> None:
if not self.is_visible():
return
# if a_box_is_appearing:
# making a box appear may mess up FocusOut events
# return
# Need to close when another app or a Thonny's dialog appears
# (othewise the box will float above this, at least in Linux).
# Don't do anything if another EditorInfoBox appears
for box in all_boxes:
try:
# need to try because asking for focus via wrong window may give exception
if box.focus_get():
# it's alright
return
except:
pass
self.hide()
def _get_related_box(self) -> Optional["EditorInfoBox"]:
return None
def _show_on_target_text(
self,
index: str,
expected_box_height: int,
preferred_position: str,
y_offset: int = 0,
) -> None:
text = self._target_text_widget
bbox = text.bbox(index)
if not bbox:
logger.warning("Could not compute bbox")
return
text_box_x, text_box_y, _, text_box_height = bbox
cursor_root_x = text.winfo_rootx() + text_box_x
cursor_root_y = text.winfo_rooty() + text_box_y
if (
preferred_position == "below"
and cursor_root_y + text_box_height + expected_box_height > text.winfo_screenheight()
):
position = "above"
else:
position = preferred_position
if position == "above":
# negative value signifies pixels between window bottom and screen bottom
y = cursor_root_y - text.winfo_screenheight()
else:
y = cursor_root_y + text_box_height
# TODO reduce x if the box wouldn't fit by width
x = cursor_root_x
self._show_on_screen(x, y + y_offset)
def _show_on_screen(self, x: int, y: int) -> None:
global a_box_is_appearing
assert not a_box_is_appearing
try:
a_box_is_appearing = True
if y < 0:
self.geometry("+%d-%d" % (x, -y))
else:
self.geometry("+%d+%d" % (x, y))
if not self.winfo_ismapped():
self._set_window_attributes()
self._check_update_size()
self.deiconify()
if not self._has_shown_on_screen:
self.tweak_first_appearance()
else:
self._check_update_size()
finally:
a_box_is_appearing = False
self._has_shown_on_screen = True
def _check_update_size(self) -> None:
if hasattr(self, "_update_size"):
# It looks it's not worth trying to move the window away from the viewport
# for calculations. At least in Ubuntu it doesn't give any advantages and
# may produce glitches
# self.geometry("+10000+5000") # move away from visible screen
# self.withdraw()
self.update() # gives proper data for size calculations
self._update_size()
self.update() # applies updated size
def hide(self, event: Optional[tk.Event] = None) -> None:
if self.winfo_ismapped():
self.withdraw()
related_box = self._get_related_box()
if related_box and related_box.is_visible():
related_box.hide(event)
# Following looks like a good idea, but in at least in Ubuntu, it would fix
# entry cursor to the given text and it can't be moved to another text anymore
# if self._target_text_widget:
# self._target_text_widget.focus_set() # in case the user has clicked on the box
def is_visible(self) -> bool:
return self.winfo_ismapped()
def tweak_first_appearance(self):
pass
class DocuBoxBase(EditorInfoBox):
def __init__(self, show_vertical_scrollbar: bool):
super().__init__()
self.text_frame = TextFrame(
master=self,
horizontal_scrollbar=False,
vertical_scrollbar=show_vertical_scrollbar,
read_only=True,
height=7,
width=40,
font="TkDefaultFont",
wrap="word",
)
self.text_frame.grid()
self.text = self.text_frame.text
self._update_theme()
def _update_theme(self, event=None):
super()._update_theme(event)
comment_opts = get_syntax_options_for_tag("comment")
gutter_opts = get_syntax_options_for_tag("GUTTER")
text_opts = get_syntax_options_for_tag("TEXT")
self.text["background"] = gutter_opts["background"]
self.text["foreground"] = text_opts["foreground"]
self.text.tag_configure("prose", font="TkDefaultFont")
self.text.tag_configure("active", font="BoldTkDefaultFont")
self.text.tag_configure("annotation", **comment_opts)
self.text.tag_configure("default", **comment_opts)
self.text.tag_configure("marker", **comment_opts)
def _append_chars(self, chars, tags=()):
self.text.direct_insert("end", chars, tags=tuple(tags))
def render_signatures(self, signatures: List[SignatureInfo], only_params=False) -> None:
for i, sig in enumerate(signatures):
if i > 0:
self._append_chars("\n")
self.render_signature(sig, only_params)
def render_signature(self, sig: SignatureInfo, only_params) -> None:
if not only_params:
self._append_chars(sig.name)
self._append_chars("(")
is_positional = False
is_kw_only = False
for i, param in enumerate(sig.params):
if i > 0:
self._append_chars(", ")
if len(sig.params) > 20:
self._append_chars("\n ")
is_positional |= param.kind == "POSITIONAL_ONLY"
if is_positional and param.kind != "POSITIONAL_ONLY":
self._append_chars("/, ", ["marker"])
is_positional = False
if param.kind == "VAR_POSITIONAL":
is_kw_only = True
elif param.kind == "KEYWORD_ONLY" and not is_kw_only:
self._append_chars("*, ", ["marker"])
is_kw_only = True
is_active_parameter = sig.current_param_index == i
self.render_parameter(param, is_active_parameter)
if is_positional:
self._append_chars(", /", ["marker"])
self._append_chars(")")
if sig.return_type and not only_params:
self._append_chars(" -> ", ["marker"])
self._append_chars(sig.return_type, ["annotation"])
def render_parameter(self, param: SignatureParameter, active: bool) -> None:
if active:
base_tags = ["active"]
else:
base_tags = []
if param.kind == "VAR_POSITIONAL":
self._append_chars("*", base_tags)
elif param.kind == "VAR_KEYWORD":
self._append_chars("**", base_tags)
self._append_chars(param.name, base_tags)
if param.annotation:
self._append_chars(":\u00A0" + param.annotation, base_tags + ["annotation"])
if param.default:
self._append_chars("=" + param.default, base_tags + ["default"])
def format_signature(self, s: str) -> str:
s = s.replace(": ", ":\u00A0")
if len(s) > self.text["width"] * 1.8 and s.count("(") and s.count(")"):
args_index = s.index("(") + 1
suffix_index = s.rindex(")")
prefix = s[:args_index]
args = s[args_index:suffix_index].split(", ")
suffix = s[suffix_index:]
s = prefix + "\n " + ",\n ".join(args) + "\n" + suffix
# don't keep / and * alone on a line
s = (
s.replace("\n /,", " /,")
.replace("\n *,", " *,")
.replace("\n /\n)", " /\n)")
.replace("\n *\n)", " *\n)")
)
return s
else:
return s
class DocuBox(DocuBoxBase):
def __init__(self):
super().__init__(show_vertical_scrollbar=True)
def set_content(self, name, item_type, signatures, docstring):
self.text.direct_delete("1.0", "end")
# self._append_chars(item_type + "\n")
if signatures:
self.render_signatures(signatures)
if signatures and docstring:
self._append_chars("\n\n")
if docstring:
self._append_chars(docstring, ["prose"])
def get_active_text_widget() -> Optional[SyntaxText]:
widget = get_workbench().focus_get()
if isinstance(widget, (CodeViewText, ShellText)):
return widget
return None
def get_cursor_position(text: SyntaxText) -> Tuple[int, int]:
parts = text.index("insert").split(".")
return int(parts[0]), int(parts[1])
def get_text_filename(text: SyntaxText) -> Optional[str]:
if isinstance(text, ShellText):
return "<Shell>"
elif isinstance(text, CodeViewText):
editor = getattr(text.master, "home_widget")
if isinstance(editor, Editor):
return editor.get_filename()
return None
def get_relevant_source_and_cursor_position(text: SyntaxText) -> Tuple[str, int, int]:
if isinstance(text, ShellText):
source = text.get("input_start", "insert")
lines = source.splitlines()
if not lines:
return source, 1, 0
else:
return source, len(lines), len(lines[-1])
else:
row, col = get_cursor_position(text)
return text.get("1.0", "end-1c"), row, col
|
the-stack_106_25728 | import serial
import logging
from . import EELS_controller
from nion.swift.model import HardwareSource
import socket
__author__ = "Yves Auad"
class EELS_Spectrometer(EELS_controller.EELSController):
def __init__(self, sport):
super().__init__()
self.success = False
self.serial_success = False
self.vsm_success = False
self.ser = serial.Serial()
self.ser.baudrate = 9600
self.ser.port = sport
self.ser.parity = serial.PARITY_NONE
self.ser.stopbits = serial.STOPBITS_ONE
self.ser.bytesize = serial.EIGHTBITS
self.ser.timeout = 0.2
try:
if not self.ser.is_open:
self.ser.open()
self.serial_success = True
except:
logging.info("***EELS SPECTROMETER***: Could not find EELS Spec. Please check hardware. "
"Entering in debug mode.")
if self.serial_success:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(0.1)
self.sock.connect(("129.175.82.70", 80))
self.vsm_success = True
except socket.timeout:
logging.info("***EELS SPECTROMETER***: Could not find VSM. Please check hardware. "
"Entering in debug mode.")
self.success = self.serial_success
def set_val(self, val, which):
if which=="off":
if not self.vsm_success: return
if val<=1000 and val>=0:
veff = int(val * 4.095)
plus = ('HV+ ' + str(veff) + '\n').encode()
self.sock.sendall(plus)
else:
logging.info('***EELS***: VSM value too high or negative. Current maximum value is 1000 V.')
#scan = HardwareSource.HardwareSourceManager().get_hardware_source_for_hardware_source_id("orsay_scan_device")
#if scan is not None:
# scan.scan_device.orsayscan.drift_tube = float(val)
else:
if which=="dmx": which="al"
if abs(val)<32767:
try:
if val < 0:
val = abs(val)
else:
val = 0xffff - val
string = which + ' 0,' + hex(val)[2:6] + '\r'
self.ser.write(string.encode())
return self.ser.read(6)
except:
logging.info(
"***EELS SPECTROMETER***: Problem communicating over serial port. Easy check using Serial Port Monitor.")
else:
logging.info("***EELS SPECTROMETER***: Attempt to write a value out of range.")
|
the-stack_106_25730 | import numpy as np
from . import rans
from utils.distributions import discretized_logistic_cdf, \
mixture_discretized_logistic_cdf
import torch
precision = 24
n_bins = 4096
def cdf_fn(z, pz, variable_type, distribution_type, inverse_bin_width):
if variable_type == 'discrete':
if distribution_type == 'logistic':
if len(pz) == 2:
return discretized_logistic_cdf(
z, *pz, inverse_bin_width=inverse_bin_width)
elif len(pz) == 3:
return mixture_discretized_logistic_cdf(
z, *pz, inverse_bin_width=inverse_bin_width)
elif distribution_type == 'normal':
pass
elif variable_type == 'continuous':
if distribution_type == 'logistic':
pass
elif distribution_type == 'normal':
pass
elif distribution_type == 'steplogistic':
pass
raise ValueError
def CDF_fn(pz, bin_width, variable_type, distribution_type):
mean = pz[0] if len(pz) == 2 else pz[0][..., (pz[0].size(-1) - 1) // 2]
MEAN = torch.round(mean / bin_width).long()
bin_locations = torch.arange(-n_bins // 2, n_bins // 2)[None, None, None, None, :] + MEAN.cpu()[..., None]
bin_locations = bin_locations.float() * bin_width
bin_locations = bin_locations.to(device=pz[0].device)
pz = [param[:, :, :, :, None] for param in pz]
cdf = cdf_fn(
bin_locations - bin_width,
pz,
variable_type,
distribution_type,
1./bin_width).cpu().numpy()
# Compute CDFs, reweigh to give all bins at least
# 1 / (2^precision) probability.
# CDF is equal to floor[cdf * (2^precision - n_bins)] + range(n_bins)
CDFs = (cdf * ((1 << precision) - n_bins)).astype('int') \
+ np.arange(n_bins)
return CDFs, MEAN
def encode_sample(
z, pz, variable_type, distribution_type, bin_width=1./256, state=None):
if state is None:
state = rans.x_init
else:
state = rans.unflatten(state)
CDFs, MEAN = CDF_fn(pz, bin_width, variable_type, distribution_type)
# z is transformed to Z to match the indices for the CDFs array
Z = torch.round(z / bin_width).long() + n_bins // 2 - MEAN
Z = Z.cpu().numpy()
if not ((np.sum(Z < 0) == 0 and np.sum(Z >= n_bins-1) == 0)):
print('Z out of allowed range of values, canceling compression')
return None
Z, CDFs = Z.reshape(-1), CDFs.reshape(-1, n_bins).copy()
for symbol, cdf in zip(Z[::-1], CDFs[::-1]):
statfun = statfun_encode(cdf)
state = rans.append_symbol(statfun, precision)(state, symbol)
state = rans.flatten(state)
return state
def decode_sample(
state, pz, variable_type, distribution_type, bin_width=1./256):
state = rans.unflatten(state)
device = pz[0].device
size = pz[0].size()[0:4]
CDFs, MEAN = CDF_fn(pz, bin_width, variable_type, distribution_type)
CDFs = CDFs.reshape(-1, n_bins)
result = np.zeros(len(CDFs), dtype=int)
for i, cdf in enumerate(CDFs):
statfun = statfun_decode(cdf)
state, symbol = rans.pop_symbol(statfun, precision)(state)
result[i] = symbol
Z_flat = torch.from_numpy(result).to(device)
Z = Z_flat.view(size) - n_bins // 2 + MEAN
z = Z.float() * bin_width
state = rans.flatten(state)
return state, z
def statfun_encode(CDF):
def _statfun_encode(symbol):
return CDF[symbol], CDF[symbol + 1] - CDF[symbol]
return _statfun_encode
def statfun_decode(CDF):
def _statfun_decode(cf):
# Search such that CDF[s] <= cf < CDF[s]
s = np.searchsorted(CDF, cf, side='right')
s = s - 1
start, freq = statfun_encode(CDF)(s)
return s, (start, freq)
return _statfun_decode
def encode(x, symbol):
return rans.append_symbol(statfun_encode, precision)(x, symbol)
def decode(x):
return rans.pop_symbol(statfun_decode, precision)(x)
|
the-stack_106_25732 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import ubelt as ub
def read_tensorboard_scalars(train_dpath, verbose=1, cache=1):
"""
Reads all tensorboard scalar events in a directory.
Caches them becuase reading events of interest from protobuf can be slow.
"""
import glob
from os.path import join
try:
from tensorboard.backend.event_processing import event_accumulator
except ImportError:
raise ImportError('tensorboard/tensorflow is not installed')
event_paths = sorted(glob.glob(join(train_dpath, 'events.out.tfevents*')))
# make a hash so we will re-read of we need to
cfgstr = ub.hash_data(list(map(ub.hash_file, event_paths))) if cache else ''
cacher = ub.Cacher('tb_scalars', cfgstr=cfgstr, enabled=cache,
dpath=join(train_dpath, '_cache'))
datas = cacher.tryload()
if datas is None:
datas = {}
for p in ub.ProgIter(list(reversed(event_paths)), desc='read tensorboard', enabled=verbose):
ea = event_accumulator.EventAccumulator(p)
ea.Reload()
for key in ea.scalars.Keys():
if key not in datas:
datas[key] = {'xdata': [], 'ydata': [], 'wall': []}
subdatas = datas[key]
events = ea.scalars.Items(key)
for e in events:
subdatas['xdata'].append(int(e.step))
subdatas['ydata'].append(float(e.value))
subdatas['wall'].append(float(e.wall_time))
# Order all information by its wall time
for key, subdatas in datas.items():
sortx = ub.argsort(subdatas['wall'])
for d, vals in subdatas.items():
subdatas[d] = list(ub.take(vals, sortx))
cacher.save(datas)
return datas
|
the-stack_106_25735 | import yaml
import logging
import requests
import os
REL_PATH = os.path.realpath(__file__).rsplit('/', 1)[0]
class MirrorConfig:
"""Class that contains config for crypto-mirror UI and stuff"""
def __init__(self, *args, **kwargs):
self.__dict__.update(kwargs)
self.validate_token()
def validate_token(self):
key_manager = KeyManager(self.weather_key_path)
key_manager.fetch_keys()
self.weather_api_token = key_manager.api_key
@classmethod
def from_yaml(cls, config_path):
with open(config_path, 'r') as cfh:
tmp_cfg = yaml.load(cfh)
tmp_cfg['config_path'] = config_path
return cls(**tmp_cfg)
class KeyManager:
def __init__(self, api_key=None, key_path=f'{REL_PATH}/assets/keys.yml', key_status=False):
self.key_path = key_path
self.key_status = key_status
self.api_key = api_key
self.fetch_keys()
def fetch_keys(self):
if os.path.exists(self.key_path):
with open(self.key_path) as kfh:
key_dict = yaml.load(kfh)
self.api_key = key_dict['darksky_token'] if key_dict else ''
else:
# assume that file doesnt exist, create it
self.add_key()
def add_key(self):
"""Prompts user for darksky key, creates key_file if valid"""
new_key = input('It doesnt look like you have any API keys yet. '
'Create one at darksky.net/dev, then paste it here: ')
if new_key:
valid = self.key_test(new_key)
if valid:
# Set key and key status
self.key_status = True
self.api_key = new_key
# write new key to file
try:
key_dict = {'darksky_token': new_key}
with open(self.key_path, 'w') as cfh:
cfh.write(f'darksky_token: {new_key}')
print('Key file created successfully!')
except Exception as e:
print(f'Failed to create key file: {e}')
else:
print('Invalid key')
@staticmethod
def key_test(key, exp_return=200):
"""Makes a test call to darksky api"""
resp = requests.get(f'https://api.darksky.net/forecast/{key}/38,-77?lang=en&units=us')
return True if resp.status_code == exp_return else False
def curr_fmt(price):
return round(float(price),2) |
the-stack_106_25737 | import os
import albumentations as A
abs_path = os.path.dirname(__file__)
args = {
'model_path': '/root/gld_pd/models/',
'data_path': '/root/snacks_data/5/',
'data_path_2019': '/root/snacks_data/5/',
'valid_csv_fn': 'test_filtered.csv',
'train_csv_fn': 'train_filtered.csv',
'gpus': '0',
'filter_warnings': True,
'logger': 'tensorboard',
'num_sanity_val_steps': 0,
'distributed_backend': 'ddp',
'channels_last': False,
'gradient_accumulation_steps': 2,
'precision': 16,
'sync_batchnorm': False,
'seed': 1138,
'num_workers': 0,
'save_weights_only': True,
'p_trainable': True,
'resume_from_checkpoint': None,
'pretrained_weights': None,
'normalization': 'imagenet',
'crop_size': 448,
'backbone': 'gluon_seresnext101_32x4d',
'embedding_size': 512,
'pool': 'gem',
'arcface_s': 45,
'arcface_m': 0.4,
'neck': 'option-D',
'head': 'arc_margin',
'crit': "bce",
'loss': 'arcface',
# 'focal_loss_gamma': 2,
'class_weights': "log",
'class_weights_norm': 'batch',
'optimizer': "sgd",
'weight_decay': 1e-4,
'lr': 0.005,
'batch_size': 32,
'test_batch_size': 240,
'max_epochs': 6,
'scheduler': {"method": "cosine", "warmup_epochs": 1},
'n_classes': 83,
'data_frac': 1.,
'neptune_project': 'xx/kaggle-landmark',
}
args['tr_aug'] = A.Compose([
# A.Resize(height=448, width=448),
A.SmallestMaxSize(512),
A.RandomCrop(height=args['crop_size'], width=args['crop_size'], p=1.),
A.HorizontalFlip(p=0.5),
])
args['val_aug'] = A.Compose([
# A.Resize(height=448, width=448),
A.SmallestMaxSize(512),
A.CenterCrop(height=args['crop_size'], width=args['crop_size'], p=1.)
])
args['test_aug'] = A.Compose([
A.Resize(height=448, width=448),
# A.SmallestMaxSize(512),
# A.CenterCrop(height=args['crop_size'], width=args['crop_size'], p=1.)
])
|
the-stack_106_25740 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import time
import uuid
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_text
from . import codex
from . import exceptions
from . import saml2idp_metadata
from . import xml_render
from .request import RequestProcessor
MINUTES = 60
HOURS = 60 * MINUTES
def get_random_id():
# It is very important that these random IDs NOT start with a number.
random_id = '_' + uuid.uuid4().hex
return random_id
def get_time_string(delta=0):
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(time.time() + delta))
# Design note: I've tried to make this easy to sub-class and override
# just the bits you need to override. I've made use of object properties,
# so that your sub-classes have access to all information: use wisely.
# Formatting note: These methods are alphabetized.
class Processor(object):
"""
Base SAML 2.0 AuthnRequest to Response Processor.
Sub-classes should provide Service Provider-specific functionality.
"""
@property
def dotted_path(self):
return '{module}.{class_name}'.format(
module=self.__module__,
class_name=self.__class__.__name__)
def __init__(self, config, name=None):
self.name = name
self._config = config.copy()
self._logger = logging.getLogger(__name__)
processor_path = self._config.get('processor', 'invalid')
self._logger.info('initializing processor {} {}'.format(processor_path, self.dotted_path))
if processor_path != self.dotted_path:
raise ImproperlyConfigured(
"config is invalid for this processor: {}".format(self._config))
if 'acs_url' not in self._config:
raise ImproperlyConfigured(
"no ACS URL specified in SP configuration: {}".format(
self._config))
self._logger.info('processor configured: {}'.format(str(self._config)))
def _build_assertion(self):
"""
Builds _assertion_params.
"""
self._determine_assertion_id()
self._determine_audience()
self._determine_subject()
self._determine_session_index()
self._assertion_params = {
'ASSERTION_ID': self._assertion_id,
'ASSERTION_SIGNATURE': '', # it's unsigned
'AUDIENCE': self._audience,
'AUTH_INSTANT': get_time_string(),
'ISSUE_INSTANT': get_time_string(),
'NOT_BEFORE': get_time_string(-1 * HOURS), # TODO: Make these settings.
'NOT_ON_OR_AFTER': get_time_string(15 * MINUTES),
'SESSION_INDEX': self._session_index,
'SESSION_NOT_ON_OR_AFTER': get_time_string(8 * HOURS),
'SP_NAME_QUALIFIER': self._audience,
'SUBJECT': self._subject,
'SUBJECT_FORMAT': self._subject_format,
}
self._assertion_params.update(self._system_params)
self._assertion_params.update(self._request_params)
def _build_response(self):
"""
Builds _response_params.
"""
self._determine_response_id()
self._response_params = {
'ISSUE_INSTANT': get_time_string(),
'RESPONSE_ID': self._response_id,
'RESPONSE_SIGNATURE': '', # initially unsigned
}
self._response_params.update(self._system_params)
self._response_params.update(self._request_params)
def _determine_assertion_id(self):
"""
Determines the _assertion_id.
"""
self._assertion_id = get_random_id()
def _determine_audience(self):
"""
Determines the _audience.
"""
self._audience = self._request_params.get('DESTINATION', None)
if not self._audience:
self._audience = self._request_params.get('PROVIDER_NAME', None)
self._logger.info('determined audience: {}'.format(self._audience))
def _determine_response_id(self):
"""
Determines _response_id.
"""
self._response_id = get_random_id()
def _determine_session_index(self):
self._session_index = self._django_request.session.session_key
def _determine_subject(self):
"""
Determines _subject and _subject_type for Assertion Subject.
"""
self._subject = self._django_request.user.email
def _encode_response(self):
"""
Encodes _response_xml to _encoded_xml.
"""
self._saml_response = codex.nice64(self._response_xml)
def _extract_saml_request(self):
"""
Retrieves the _saml_request AuthnRequest from the _django_request.
"""
self._saml_request = self._django_request.session['SAMLRequest']
self._relay_state = self._django_request.session['RelayState']
def _format_assertion(self):
"""
Formats _assertion_params as _assertion_xml.
"""
raise NotImplemented()
def _format_response(self):
"""
Formats _response_params as _response_xml.
"""
sign_it = saml2idp_metadata.SAML2IDP_CONFIG['signing']
self._response_xml = xml_render.get_response_xml(self._response_params, self._assertion_xml, signed=sign_it)
def _get_django_response_params(self):
"""
Returns a dictionary of parameters for the response template.
"""
tv = {
'acs_url': self._request_params['ACS_URL'],
'saml_response': force_text(self._saml_response),
'relay_state': self._relay_state,
'autosubmit': saml2idp_metadata.SAML2IDP_CONFIG['autosubmit'],
}
return tv
def _parse_request(self):
"""
Parses various parameters from _request_xml into _request_params.
"""
self._request.parse_request()
if self._request.signed:
self._request.parse_signed() # TODO: get cert from metadata
params = dict()
params['ACS_URL'] = self._request.acs_url
params['REQUEST_ID'] = self._request.request_id
params['DESTINATION'] = self._request.destination
params['PROVIDER_NAME'] = self._request.provider_name
self._request_params = params
def _reset(self, django_request, sp_config=None):
"""
Initialize (and reset) object properties, so we don't risk carrying
over anything from the last authentication.
If provided, use sp_config throughout; otherwise, it will be set in
_validate_request().
"""
self._assertion_params = None
self._assertion_xml = None
self._django_request = django_request
self._relay_state = None
self._request = None
self._request_id = None
self._request_xml = None
self._request_params = None
self._response_id = None
self._saml_request = None
self._saml_response = None
self._subject = None
self._subject_format = 'urn:oasis:names:tc:SAML:2.0:nameid-format:email'
self._system_params = {
'ISSUER': saml2idp_metadata.SAML2IDP_CONFIG['issuer'],
}
def _validate_request(self):
"""
Validates the SAML request against the SP configuration of this
processor. Sub-classes should override this and raise a
`CannotHandleAssertion` exception if the validation fails.
Raises:
CannotHandleAssertion: if the ACS URL specified in the SAML request
doesn't match the one specified in the processor config.
"""
request_acs_url = self._request_params['ACS_URL']
if self._config['acs_url'] != request_acs_url:
msg = ("couldn't find ACS url '{}' in SAML2IDP_REMOTES "
"setting.".format(request_acs_url))
self._logger.info(msg)
raise exceptions.CannotHandleAssertion(msg)
def _validate_user(self):
"""
Validates the User. Sub-classes should override this and
throw an CannotHandleAssertion Exception if the validation does not succeed.
"""
pass
def can_handle(self, django_request):
"""
Returns true if this processor can handle this request.
"""
self._reset(django_request)
# Read the request.
try:
self._extract_saml_request()
except Exception as exc:
msg = "can't find SAML request in user session: %s" % exc
self._logger.info(msg)
raise exceptions.CannotHandleAssertion(msg)
self._request = RequestProcessor(self._saml_request)
try:
self._parse_request()
except Exception as exc:
msg = "can't parse SAML request: %s" % exc
self._logger.info(msg)
raise exceptions.CannotHandleAssertion(msg)
self._validate_request()
return True
def generate_response(self):
"""
Processes request and returns template variables suitable for a response.
"""
# Build the assertion and response.
self._validate_user()
self._build_assertion()
self._format_assertion()
self._build_response()
self._format_response()
self._encode_response()
# Return proper template params.
return self._get_django_response_params()
def init_deep_link(self, request, sp_config, url):
"""
Initialize this Processor to make an IdP-initiated call to the SP's
deep-linked URL.
"""
self._reset(request, sp_config)
acs_url = self._config['acs_url']
# NOTE: The following request params are made up. Some are blank,
# because they comes over in the AuthnRequest, but we don't have an
# AuthnRequest in this case:
# - Destination: Should be this IdP's SSO endpoint URL. Not used in the response?
# - ProviderName: According to the spec, this is optional.
self._request_params = {
'ACS_URL': acs_url,
'DESTINATION': '',
'PROVIDER_NAME': '',
}
self._relay_state = url
|
the-stack_106_25742 | '''OpenGL extension ARB.vertex_buffer_object
Overview (from the spec)
This extension defines an interface that allows various types of data
(especially vertex array data) to be cached in high-performance
graphics memory on the server, thereby increasing the rate of data
transfers.
Chunks of data are encapsulated within "buffer objects", which
conceptually are nothing more than arrays of bytes, just like any
chunk of memory. An API is provided whereby applications can read
from or write to buffers, either via the GL itself (glBufferData,
glBufferSubData, glGetBufferSubData) or via a pointer to the memory.
The latter technique is known as "mapping" a buffer. When an
application maps a buffer, it is given a pointer to the memory. When
the application finishes reading from or writing to the memory, it is
required to "unmap" the buffer before it is once again permitted to
use that buffer as a GL data source or sink. Mapping often allows
applications to eliminate an extra data copy otherwise required to
access the buffer, thereby enhancing performance. In addition,
requiring that applications unmap the buffer to use it as a data
source or sink ensures that certain classes of latent synchronization
bugs cannot occur.
Although this extension only defines hooks for buffer objects to be
used with OpenGL's vertex array APIs, the API defined in this
extension permits buffer objects to be used as either data sources or
sinks for any GL command that takes a pointer as an argument.
Normally, in the absence of this extension, a pointer passed into the
GL is simply a pointer to the user's data. This extension defines
a mechanism whereby this pointer is used not as a pointer to the data
itself, but as an offset into a currently bound buffer object. The
buffer object ID zero is reserved, and when buffer object zero is
bound to a given target, the commands affected by that buffer binding
behave normally. When a nonzero buffer ID is bound, then the pointer
represents an offset.
In the case of vertex arrays, this extension defines not merely one
binding for all attributes, but a separate binding for each
individual attribute. As a result, applications can source their
attributes from multiple buffers. An application might, for example,
have a model with constant texture coordinates and variable geometry.
The texture coordinates might be retrieved from a buffer object with
the usage mode "STATIC_DRAW", indicating to the GL that the
application does not expect to update the contents of the buffer
frequently or even at all, while the vertices might be retrieved from
a buffer object with the usage mode "STREAM_DRAW", indicating that
the vertices will be updated on a regular basis.
In addition, a binding is defined by which applications can source
index data (as used by DrawElements, DrawRangeElements, and
MultiDrawElements) from a buffer object. On some platforms, this
enables very large models to be rendered with no more than a few
small commands to the graphics device.
It is expected that a future extension will allow sourcing pixel data
from and writing pixel data to a buffer object.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/ARB/vertex_buffer_object.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_vertex_buffer_object'
GL_BUFFER_SIZE_ARB = constant.Constant( 'GL_BUFFER_SIZE_ARB', 0x8764 )
GL_BUFFER_USAGE_ARB = constant.Constant( 'GL_BUFFER_USAGE_ARB', 0x8765 )
GL_ARRAY_BUFFER_ARB = constant.Constant( 'GL_ARRAY_BUFFER_ARB', 0x8892 )
GL_ELEMENT_ARRAY_BUFFER_ARB = constant.Constant( 'GL_ELEMENT_ARRAY_BUFFER_ARB', 0x8893 )
GL_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_ARRAY_BUFFER_BINDING_ARB', 0x8894 )
glget.addGLGetConstant( GL_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB', 0x8895 )
glget.addGLGetConstant( GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_VERTEX_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_VERTEX_ARRAY_BUFFER_BINDING_ARB', 0x8896 )
glget.addGLGetConstant( GL_VERTEX_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_NORMAL_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_NORMAL_ARRAY_BUFFER_BINDING_ARB', 0x8897 )
glget.addGLGetConstant( GL_NORMAL_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_COLOR_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_COLOR_ARRAY_BUFFER_BINDING_ARB', 0x8898 )
glget.addGLGetConstant( GL_COLOR_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_INDEX_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_INDEX_ARRAY_BUFFER_BINDING_ARB', 0x8899 )
glget.addGLGetConstant( GL_INDEX_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB', 0x889A )
glget.addGLGetConstant( GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB', 0x889B )
glget.addGLGetConstant( GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB', 0x889C )
glget.addGLGetConstant( GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB', 0x889D )
glget.addGLGetConstant( GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB', 0x889E )
glget.addGLGetConstant( GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB, (1,) )
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB = constant.Constant( 'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB', 0x889F )
GL_READ_ONLY_ARB = constant.Constant( 'GL_READ_ONLY_ARB', 0x88B8 )
GL_WRITE_ONLY_ARB = constant.Constant( 'GL_WRITE_ONLY_ARB', 0x88B9 )
GL_READ_WRITE_ARB = constant.Constant( 'GL_READ_WRITE_ARB', 0x88BA )
GL_BUFFER_ACCESS_ARB = constant.Constant( 'GL_BUFFER_ACCESS_ARB', 0x88BB )
GL_BUFFER_MAPPED_ARB = constant.Constant( 'GL_BUFFER_MAPPED_ARB', 0x88BC )
GL_BUFFER_MAP_POINTER_ARB = constant.Constant( 'GL_BUFFER_MAP_POINTER_ARB', 0x88BD )
GL_STREAM_DRAW_ARB = constant.Constant( 'GL_STREAM_DRAW_ARB', 0x88E0 )
GL_STREAM_READ_ARB = constant.Constant( 'GL_STREAM_READ_ARB', 0x88E1 )
GL_STREAM_COPY_ARB = constant.Constant( 'GL_STREAM_COPY_ARB', 0x88E2 )
GL_STATIC_DRAW_ARB = constant.Constant( 'GL_STATIC_DRAW_ARB', 0x88E4 )
GL_STATIC_READ_ARB = constant.Constant( 'GL_STATIC_READ_ARB', 0x88E5 )
GL_STATIC_COPY_ARB = constant.Constant( 'GL_STATIC_COPY_ARB', 0x88E6 )
GL_DYNAMIC_DRAW_ARB = constant.Constant( 'GL_DYNAMIC_DRAW_ARB', 0x88E8 )
GL_DYNAMIC_READ_ARB = constant.Constant( 'GL_DYNAMIC_READ_ARB', 0x88E9 )
GL_DYNAMIC_COPY_ARB = constant.Constant( 'GL_DYNAMIC_COPY_ARB', 0x88EA )
glBindBufferARB = platform.createExtensionFunction(
'glBindBufferARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint,),
doc = 'glBindBufferARB( GLenum(target), GLuint(buffer) ) -> None',
argNames = ('target', 'buffer',),
)
glDeleteBuffersARB = platform.createExtensionFunction(
'glDeleteBuffersARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei, arrays.GLuintArray,),
doc = 'glDeleteBuffersARB( GLsizei(n), GLuintArray(buffers) ) -> None',
argNames = ('n', 'buffers',),
)
glGenBuffersARB = platform.createExtensionFunction(
'glGenBuffersARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei, arrays.GLuintArray,),
doc = 'glGenBuffersARB( GLsizei(n), GLuintArray(buffers) ) -> None',
argNames = ('n', 'buffers',),
)
glIsBufferARB = platform.createExtensionFunction(
'glIsBufferARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=constants.GLboolean,
argTypes=(constants.GLuint,),
doc = 'glIsBufferARB( GLuint(buffer) ) -> constants.GLboolean',
argNames = ('buffer',),
)
glBufferDataARB = platform.createExtensionFunction(
'glBufferDataARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLsizeiptrARB, ctypes.c_void_p, constants.GLenum,),
doc = 'glBufferDataARB( GLenum(target), GLsizeiptrARB(size), c_void_p(data), GLenum(usage) ) -> None',
argNames = ('target', 'size', 'data', 'usage',),
)
glBufferSubDataARB = platform.createExtensionFunction(
'glBufferSubDataARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLintptrARB, constants.GLsizeiptrARB, ctypes.c_void_p,),
doc = 'glBufferSubDataARB( GLenum(target), GLintptrARB(offset), GLsizeiptrARB(size), c_void_p(data) ) -> None',
argNames = ('target', 'offset', 'size', 'data',),
)
glGetBufferSubDataARB = platform.createExtensionFunction(
'glGetBufferSubDataARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLintptrARB, constants.GLsizeiptrARB, ctypes.c_void_p,),
doc = 'glGetBufferSubDataARB( GLenum(target), GLintptrARB(offset), GLsizeiptrARB(size), c_void_p(data) ) -> None',
argNames = ('target', 'offset', 'size', 'data',),
)
glMapBufferARB = platform.createExtensionFunction(
'glMapBufferARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=ctypes.c_void_p,
argTypes=(constants.GLenum, constants.GLenum,),
doc = 'glMapBufferARB( GLenum(target), GLenum(access) ) -> ctypes.c_void_p',
argNames = ('target', 'access',),
)
glUnmapBufferARB = platform.createExtensionFunction(
'glUnmapBufferARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=constants.GLboolean,
argTypes=(constants.GLenum,),
doc = 'glUnmapBufferARB( GLenum(target) ) -> constants.GLboolean',
argNames = ('target',),
)
glGetBufferParameterivARB = platform.createExtensionFunction(
'glGetBufferParameterivARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glGetBufferParameterivARB( GLenum(target), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glGetBufferPointervARB = platform.createExtensionFunction(
'glGetBufferPointervARB', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, ctypes.POINTER(ctypes.c_void_p),),
doc = 'glGetBufferPointervARB( GLenum(target), GLenum(pname), POINTER(ctypes.c_void_p)(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
def glInitVertexBufferObjectARB():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
the-stack_106_25743 | import unittest
import nn_grad_test as nt
import numpy as np
import start.neural_network as nn
import start.layer_dict as ld
import start.weight_update_params as wup
class TestTrainSigo2(unittest.TestCase):
def test(self):
net = nn.NeuralNetwork("test_net", 1)
layer = ld.hdict["fc"](10)
net.add_layer(layer)
layer = ld.hdict["fc"](1)
net.add_layer(layer)
layer = ld.odict["sigmoid"](1)
net.add_layer(layer)
net.set_l2_loss_coeff(.001)
np.random.seed(1)
params = wup.GradientDescentParams(.2)
net.set_weight_update_function(params)
net.initialize_parameters()
a = 0.8;
for i in range(10000):
x = (np.random.rand(1,32)*0.1 + 0.75)
y = x > a
net.train(x,y)
x = 0.79
# print(net.predict(x))
self.assertTrue(net.predict(x) < 0.5)
x = 0.81
# print(net.predict(x))
self.assertTrue(net.predict(x) > 0.5)
|
the-stack_106_25745 | import os
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
from torch_mimicry.nets.basemodel.basemodel import BaseModel
class ExampleModel(BaseModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.linear = nn.Linear(1, 4)
nn.init.xavier_uniform_(self.linear.weight.data)
def forward(self, x):
return
class TestBaseModel:
def setup(self):
self.model = ExampleModel()
self.opt = optim.Adam(self.model.parameters(), 2e-4, betas=(0.0, 0.9))
self.global_step = 0
self.log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_log")
def test_save_and_restore_checkpoint(self):
ckpt_dir = os.path.join(self.log_dir, 'checkpoints/model')
ckpt_file = os.path.join(ckpt_dir,
"model_{}_steps.pth".format(self.global_step))
self.model.save_checkpoint(directory=ckpt_dir,
optimizer=self.opt,
global_step=self.global_step)
restored_model = ExampleModel()
restored_opt = optim.Adam(self.model.parameters(),
2e-4,
betas=(0.0, 0.9))
restored_model.restore_checkpoint(ckpt_file=ckpt_file,
optimizer=self.opt)
# Check weights are preserved
assert all(
(restored_model.linear.weight == self.model.linear.weight) == 1)
# Check optimizers have same state dict
assert self.opt.state_dict() == restored_opt.state_dict()
def test_count_params(self):
num_total_params, num_trainable_params = self.model.count_params()
assert num_trainable_params == num_total_params == 8
def test_get_device(self):
assert type(self.model.device) == torch.device
def teardown(self):
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
del self.model
del self.opt
if __name__ == "__main__":
test = TestBaseModel()
test.setup()
test.test_save_and_restore_checkpoint()
test.test_count_params()
test.test_get_device()
test.teardown()
|
the-stack_106_25746 | import os
from distutils.dir_util import copy_tree
import time
import pytest
from nixui.graphics import main_window
from nixui import state_model
from nixui.options.option_tree import OptionTree
from nixui.options.attribute import Attribute
SAMPLES_PATH = 'tests/sample'
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
class Helpers:
class timeout(object):
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
@pytest.fixture
def helpers():
return Helpers
@pytest.fixture
def samples_path(tmpdir):
copy_tree(SAMPLES_PATH, str(tmpdir))
return tmpdir
@pytest.fixture
def statemodel(samples_path):
os.environ['CONFIGURATION_PATH'] = os.path.abspath(os.path.join(samples_path, 'configuration.nix'))
return state_model.StateModel()
@pytest.fixture
def nix_gui_main_window(statemodel, qtbot):
nix_gui_mw = main_window.NixGuiMainWindow(statemodel)
yield nix_gui_mw
nix_gui_mw.close()
@pytest.fixture
def option_tree():
os.environ['CONFIGURATION_PATH'] = os.path.abspath(os.path.join(SAMPLES_PATH, 'configuration.nix'))
statemodel = state_model.StateModel()
return statemodel.option_tree
@pytest.fixture
def minimal_option_tree():
return OptionTree(
{
Attribute('myList'): {'type_string': 'list of strings'},
Attribute('myAttrs'): {'type_string': 'attribute set of submodules'},
Attribute('myAttrs."<name>"'): {},
},
{}
)
@pytest.fixture
def minimal_state_model(mocker, minimal_option_tree):
mocker.patch('nixui.state_model.api.get_option_tree', return_value=minimal_option_tree)
return state_model.StateModel()
|
the-stack_106_25747 | # Author: Gustavo Solcia
# E-mail: [email protected]
"""3D reconstruction using a segmentation image. We apply Marching Cubes algorithm and a
surface smoothing from the largest connected region. LOOK AT YOUR DATA: for some cases
the surface smoothing can shrink parts of your surface.
"""
import os
import vtk
import SimpleITK as sitk
def applyMarchingCubes(image, threshold):
"""Wrapper function to apply vtk marching cubes algorithm.
Parameters
----------
image: vtkImage
vtkImage from vtkNIFTIImageReader
threshold: float
threshold for binarization purposes
Returns
-------
largestRegion: vtkPolyData
vtk data object that represents a geometric structure with vertices, lines, polygons...
"""
contourNumber = 0
marchingCubes = vtk.vtkMarchingCubes()
marchingCubes.SetInputData(image)
marchingCubes.ComputeNormalsOn()
marchingCubes.ComputeGradientsOn()
marchingCubes.SetValue(contourNumber, threshold)
marchingCubes.Update()
mcPoly = marchingCubes.GetOutput()
largestRegion = getLargestRegion(mcPoly)
return largestRegion
def getLargestRegion(poly):
"""Function to get largest connected region from vtk poly data.
Parameters
----------
poly: vtkPolyData
vtk data object that represents a geometric structure with vertices, lines, polygons...
Returns
-------
largestRegion: vtkPolyData
largest connected region from poly input
"""
connectivityFilter = vtk.vtkPolyDataConnectivityFilter()
connectivityFilter.SetInputData(poly)
connectivityFilter.SetExtractionModeToLargestRegion()
connectivityFilter.Update()
largestRegion = connectivityFilter.GetOutput()
return largestRegion
def applyPolyFilter(poly):
"""Function that apply a surface vtk poly data filter.
Parameters
----------
poly: vtkPolyData
vtk data object that represents a geometric structure with vertices, lines, polygons...
Returns
-------
smoothPoly: vtkPolyData
smooth surface from poly input
"""
#These are parameters that worked fine for most of my applications.
#However, if you are having shrinking problems:
#-First, I would consider a higher passBand (e. g., 0.3, 0.4, 0.5, etc...).
#-Second, with a different passBand, I would increase the numberOfIterations
#and gradually decrease that number (but never going less than 100 iterations).
numberOfIterations = 100
passBand = 0.25
featureAngle = 120.0
polyFilter = vtk.vtkWindowedSincPolyDataFilter()
polyFilter.SetInputData(poly)
polyFilter.SetNumberOfIterations(numberOfIterations)
polyFilter.SetPassBand(passBand)
polyFilter.SetFeatureAngle(featureAngle)
polyFilter.Update()
smoothPoly = polyFilter.GetOutput()
return smoothPoly
def readImage(path, name):
"""vtkNIFTI image reader wrapper function.
Parameters
----------
path: string
String containing a path to the data directory
name: string
String containing the data or sample name
Returns
-------
image: vtkNIFTIImage
Desired image from path+name
"""
reader = vtk.vtkNIFTIImageReader()
reader.SetFileName(path+name)
reader.Update()
image = reader.GetOutput()
return image
def writeSTL(path, name, poly):
"""STL writting function for vtkPolyData.
Parameters
----------
path: string
String containing a path to the data directory
name: string
String containing the data or sample name
poly: vtkPolyData
vtk data object that represents a geometric structure with vertices, lines, polygons...
"""
writer = vtk.vtkSTLWriter()
writer.SetInputData(poly)
writer.SetFileName(path+name)
writer.Update()
if __name__=='__main__':
sample = '3C'
inputPath = os.path.abspath('/home/solcia/Documents/phd/MRI data/rocks/'+sample+'/PSIF300/')
outputPath = os.path.abspath('/home/solcia/Documents/phd/3DModels/rocks/'+sample+'/')
inputName = '/Atropos_'+sample+'.nii.gz'
cubesOutputName = '/cubes_'+sample+'.stl'
smoothOutputName = '/smooth_'+sample+'.stl'
threshold = 2.5
vtkImage = readImage(inputPath, inputName)
mcPoly = applyMarchingCubes(vtkImage, threshold)
polyFiltered = applyPolyFilter(mcPoly)
writeSTL(outputPath,cubesOutputName, mcPoly)
writeSTL(outputPath,smoothOutputName, polyFiltered)
|
the-stack_106_25750 | # -*- coding: utf-8 -*-
u"""run test files in separate processes
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
def default_command(*args):
"""Run tests one at a time with py.test.
Searches in ``tests`` sub-directory if not provided a
list of tests.
Arguments are directories or files, which are searched for _test.py
files.
An argument which is ``case=<pattern>``, is passed to pytest
as ``-k <pattern>``.
Writes failures to ``<base>_test.log``
Args:
args (str): test dirs, files, options
Returns:
str: passed=N if all passed, else raises `pkcli.Error`
"""
from pykern import pkcli
from pykern import pkconfig
from pykern import pksubprocess
from pykern import pkio
from pykern import pkunit
import os
import sys
cfg = pkconfig.init(
max_failures=(5, int, 'maximum number of test failures before exit'),
)
e = PKDict(os.environ)
n = 0
f = []
c = []
paths, flags = _args(args)
for t in paths:
n += 1
o = t.replace('.py', '.log')
m = 'pass'
try:
sys.stdout.write(t)
sys.stdout.flush()
pksubprocess.check_call_with_signals(
['py.test', '--tb=native', '-v', '-s', '-rs', t] + flags,
output=o,
env=PKDict(
os.environ,
).pkupdate({pkunit.TEST_FILE_ENV: t}),
#TODO(robnagler) not necessary
# recursive_kill=True,
)
except Exception as e:
if isinstance(e, RuntimeError) and 'exit(5)' in e.args[0]:
# 5 means test was skipped
# see http://doc.pytest.org/en/latest/usage.html#possible-exit-codes
m = 'skipped'
else:
m = 'FAIL {}'.format(o)
f.append(o)
sys.stdout.write(' ' + m + '\n')
if len(f) >= cfg.max_failures:
sys.stdout.write('too many failures={} aborting\n'.format(len(f)))
break
if n == 0:
pkcli.command_error('no tests found')
if len(f) > 0:
# Avoid dumping too many test logs
for o in f[:5]:
sys.stdout.write(pkio.read_text(o))
sys.stdout.flush()
pkcli.command_error('FAILED={} passed={}'.format(len(f), n - len(f)))
return 'passed={}'.format(n)
def _args(tests):
paths = []
flags = []
for t in tests:
if '=' in t:
a, b = t.split('=')
if a == 'case':
flags.extend(('-k', b))
else:
pkcli.command_error('unsupported option={}'.format(t))
else:
paths.append(t)
return _find(paths), flags
def _find(paths):
from pykern import pkio
import re
i = re.compile(r'(?:_work|_data)/')
res = []
cwd = pkio.py_path()
for t in paths or ('tests',):
t = pkio.py_path(t)
if t.check(file=True):
res.append(str(cwd.bestrelpath(t)))
continue
for p in pkio.walk_tree(t, re.compile(r'_test\.py$')):
p = str(cwd.bestrelpath(p))
if not i.search(p):
res.append(p)
return res
|
the-stack_106_25751 | import numpy as np
import torch
from torch.nn import functional as F
from nflows.transforms.base import InputOutsideDomain
from nflows.utils import torchutils
from nflows.transforms.standard import PointwiseAffineTransform
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
def unconstrained_rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails="linear",
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == "linear":
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError("{} tails are not implemented.".format(tails))
if torch.any(inside_interval_mask):
(
outputs[inside_interval_mask],
logabsdet[inside_interval_mask],
) = rational_quadratic_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
def rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tail_bound=None,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
log_det_contr = [0]
if tail_bound:
shift_op = PointwiseAffineTransform(1/2, 1/(2 * tail_bound))
if not inverse:
inputs, log_det_contr = shift_op.forward(inputs)
if torch.min(inputs) < left or torch.max(inputs) > right:
raise InputOutsideDomain()
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError("Minimal bin width too large for the number of bins")
if min_bin_height * num_bins > 1.0:
raise ValueError("Minimal bin height too large for the number of bins")
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = torchutils.searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = torchutils.searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
) + input_heights * (input_delta - input_derivatives)
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
)
c = -input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
# root = (- b + torch.sqrt(discriminant)) / (2 * a)
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
if tail_bound:
outputs, log_det_contr = shift_op.inverse(outputs)
return outputs, -logabsdet + log_det_contr[0]
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, logabsdet + log_det_contr[0]
|
the-stack_106_25756 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mollie.ideal.helpers import _get_mollie_xml, get_mollie_bank_choices
class MollieIdealPayment(models.Model):
transaction_id = models.CharField(_('Transaction ID'), max_length=255)
amount = models.DecimalField(_('Amount'), max_digits=64, decimal_places=2)
bank_id = models.CharField(_('Bank ID'), max_length=4,
choices=get_mollie_bank_choices(show_all_banks=True),
default = '')
description = models.CharField(_('Description'), max_length=29)
timestamp = models.DateTimeField(_('Timestamp'), auto_now_add=True)
consumer_account = models.CharField(_('Consumer account'), max_length=255, blank=True)
consumer_name = models.CharField(_('Consumer name'), max_length=255, blank=True)
consumer_city = models.CharField(_('Consumer city'), max_length=255, blank=True)
class Meta:
abstract = True
verbose_name = _('Mollie/iDEAL payment')
def get_order_url(self):
'Sets up a payment with Mollie.nl and returns an order URL.'
if settings.MOLLIE_REVERSE_URLS:
reporturl = settings.MOLLIE_IMPLEMENTING_SITE_URL+reverse(settings.MOLLIE_REPORT_URL)
returnurl = settings.MOLLIE_IMPLEMENTING_SITE_URL+reverse(settings.MOLLIE_RETURN_URL)
else:
reporturl = settings.MOLLIE_REPORT_URL
returnurl = settings.MOLLIE_RETURN_URL
request_dict = dict(
a = 'fetch',
amount = int(self.amount * 100),
bank_id = self.bank_id,
description = self.description,
partnerid = settings.MOLLIE_PARTNER_ID,
reporturl = reporturl,
returnurl = returnurl
)
if settings.MOLLIE_PROFILE_KEY:
request_dict.update(dict(
profile_key=settings.MOLLIE_PROFILE_KEY
))
parsed_xml = _get_mollie_xml(request_dict)
order = parsed_xml.find('order')
order_url = order.findtext('URL')
self.transaction_id = order.findtext('transaction_id')
self.save()
return order_url
fetch = get_order_url
def is_paid(self):
'Checks whether a payment has been made successfully.'
request_dict = dict(
a = 'check',
partnerid = settings.MOLLIE_PARTNER_ID,
transaction_id = self.transaction_id
)
parsed_xml = _get_mollie_xml(request_dict)
order = parsed_xml.find('order')
consumer = order.find('consumer')
if consumer:
self.consumer_account = consumer.findtext('consumerAccount')
self.consumer_city = consumer.findtext('consumerCity')
self.consumer_name = consumer.findtext('consumerName')
if order.findtext('payed') == 'true':
return True
return False
check = is_paid
@property
def bank_name(self):
return self.get_bank_id_display()
def __unicode__(self):
return u'Mollie/iDEAL Payment ID: %d' % self.id
|
the-stack_106_25757 | from pptx import Presentation
from pptx.util import Mm, Pt
from pptx.dml.color import RGBColor
from pptx.enum.shapes import MSO_SHAPE, MSO_CONNECTOR
from pptx.enum.text import PP_ALIGN
import importlib.resources as pkg_resources
from concurrent.futures import ThreadPoolExecutor, Future
from threading import Lock
from .backend import *
class PptxBackend(Backend):
''' A very basic .pptx generator that only roughly matches results of other backends.
in PPTX format we have the following limitations due to our dependency python-pptx:
- ignore background colors
- ignore vertical alignment and padding of titles
- do not support 'dashed' frames - if a frame is 'dashed' the frame in pptx will be normal (but still has a frame)
- only support text rotation by 0° and +-90°
'''
def __init__(self):
self._thread_pool = ThreadPoolExecutor()
self._slide_mutex = Lock()
def assemble_grid(self, components: List[Component], output_dir: str):
return components
def combine_grids(self, data: List[List[Component]], idx: int, bounds: Bounds) -> List[Component]:
flat = []
for row in data:
flat.extend(row)
return flat
def _add_image(self, c: Component, slide):
# Write image to temp folder
with tempfile.TemporaryDirectory() as tmpdir:
fname = c.data.make_raster(c.bounds.width, c.bounds.height, os.path.join(tmpdir, "image"))
self._slide_mutex.acquire()
shape = slide.shapes.add_picture(fname, Mm(c.bounds.left), Mm(c.bounds.top),
width=Mm(c.bounds.width))
shape.shadow.inherit = False
self._slide_mutex.release()
if c.has_frame:
self._slide_mutex.acquire()
offset = Pt(c.frame_linewidth) / 2
shape = slide.shapes.add_shape(MSO_SHAPE.RECTANGLE, Mm(c.bounds.left) + offset,
Mm(c.bounds.top) + offset, Mm(c.bounds.width) - Pt(c.frame_linewidth),
Mm(c.bounds.height) - Pt(c.frame_linewidth))
shape.shadow.inherit = False
shape.line.color.rgb = RGBColor(c.frame_color[0], c.frame_color[1], c.frame_color[2])
shape.line.width = Pt(c.frame_linewidth)
# shape.line.join_type = 'Miter' # Removes rounded edges, but is not supported, yet (sadly)
shape.fill.background()
self._slide_mutex.release()
def combine_rows(self, data: List[Component], bounds: Bounds):
# We load a template from a file to have some nicer line styles etc by default
# (they cannot currently be specified via python-pptx)
with tempfile.TemporaryDirectory() as tmpdir:
themedata = pkg_resources.read_binary(__package__, "theme.pptx")
p = os.path.join(tmpdir, "theme.pptx")
with open(p, "wb") as f:
f.write(themedata)
prs = Presentation(p)
# Create a single slide presentation with a blank slide
# prs = Presentation()
prs.slide_height = Mm(bounds.height)
prs.slide_width = Mm(bounds.width)
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
# Add all our elements to the slide
flat = []
for row in data:
flat.extend(row)
# Generate all images in parallel
futures = []
for c in flat:
if isinstance(c, ImageComponent):
futures.append(self._thread_pool.submit(self._add_image, c, slide))
for f in futures:
f.result()
# Add everything else afterwards, to ensure proper z-order
for c in flat:
if isinstance(c, TextComponent):
if c.rotation == 90.0 or c.rotation == -90.0:
# The shape is rotated about its center. We want a rotation about the top left corner instead.
# Since we only allow 90° rotations, we can correct for that with a simple translation
pos_top = c.bounds.top + c.bounds.height / 2. - c.bounds.width / 2.
pos_left = c.bounds.left - c.bounds.height / 2. + c.bounds.width / 2.
# swap height and width
height, width = c.bounds.width, c.bounds.height
shape = slide.shapes.add_shape(MSO_SHAPE.RECTANGLE, Mm(pos_left), Mm(pos_top),
Mm(width), Mm(height))
# tikz rotation is counter-clockwise, pptx clockwise (we switch in pptx)
shape.rotation = -c.rotation
else:
shape = slide.shapes.add_shape(MSO_SHAPE.RECTANGLE, Mm(c.bounds.left), Mm(c.bounds.top),
Mm(c.bounds.width), Mm(c.bounds.height))
shape.shadow.inherit = False
# Background color
if c.background_color is not None:
shape.fill.solid()
shape.fill.fore_color.rgb = RGBColor(c.background_color[0], c.background_color[1],
c.background_color[2])
else:
shape.fill.background()
shape.line.fill.background()
# Text properties
text_frame = shape.text_frame
p = text_frame.paragraphs[0]
p.alignment = {
"center": PP_ALIGN.CENTER, "left": PP_ALIGN.LEFT, "right": PP_ALIGN.RIGHT
}[c.horizontal_alignment]
text_frame.margin_top = 0
text_frame.margin_bottom = 0
if c.horizontal_alignment == 'right':
text_frame.margin_right = Mm(c.padding.width_mm)
text_frame.margin_left = 0
else:
text_frame.margin_right = 0
text_frame.margin_left = Mm(c.padding.width_mm)
run = p.add_run()
run.text = c.content.replace("\\\\", "\n")
run.font.color.rgb = RGBColor(c.color[0], c.color[1], c.color[2])
run.font.size = Pt(c.fontsize)
if isinstance(c, RectangleComponent):
shape = slide.shapes.add_shape(MSO_SHAPE.RECTANGLE, Mm(c.bounds.left), Mm(c.bounds.top),
Mm(c.bounds.width), Mm(c.bounds.height))
shape.shadow.inherit = False
shape.line.color.rgb = RGBColor(c.color[0], c.color[1], c.color[2])
shape.line.width = Pt(c.linewidth)
# shape.line.join_type = 'Miter' # Removes rounded edges, but is not supported, yet (sadly)
shape.fill.background()
if isinstance(c, LineComponent):
shape = slide.shapes.add_connector(MSO_CONNECTOR.STRAIGHT, Mm(c.from_x), Mm(c.from_y),
Mm(c.to_x), Mm(c.to_y))
shape.shadow.inherit = False
shape.line.color.rgb = RGBColor(c.color[0], c.color[1], c.color[2])
shape.line.width = Pt(c.linewidth)
return prs
def write_to_file(self, data, filename: str):
data.save(filename) |
the-stack_106_25760 | from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="wplay",
version="5.0.3",
install_requires=["python-telegram-bot >= 11.1.0",
"datetime >= 4.3",
"playsound >= 1.2.2",
"argparse >= 1.4.0",
"beautifulsoup4 >= 4.8.1",
"pyppeteer >= 0.0.25",
"whaaaaat>=0.5.2",
"prompt_toolkit==1.0.14",
"pyfiglet>=0.8.post1",
"requests>=2.22.0",
"psutil>=5.7.0",
"flake8"
],
packages=find_packages(),
description="command line software to play with your WhatsApp",
long_description=long_description,
long_description_content_type="text/markdown",
author="Rohit Potter, Alexandre Calil",
author_email="[email protected], [email protected]",
license="MIT",
python_requires=">=3.6",
url="https://github.com/rpotter12/whatsapp-play/",
download_url="https://pypi.org/project/wplay/",
keywords=[
"whatsapp",
"whatsapp-cli",
"whatsapp-chat",
"message-blast",
"message-timer",
"tracker",
"online tracking",
"save-chat"
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["wplay = wplay.__main__:main"]},
)
|
the-stack_106_25761 | from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import TypeVar, Union, Type, ForwardRef
from .addon import AddonBase
TAddon = TypeVar('TAddon')
del TYPE_CHECKING
import logging
logger = logging.getLogger('helper')
from .connector.ADBConnector import ADBConnector
from .frontend import Frontend, DummyFrontend
from .mixin import AddonMixin
class BaseAutomator(AddonMixin):
frontend: Frontend
def __init__(self, device_connector=None, frontend=None): # 当前绑定到的设备
self.logger = logging.getLogger(type(self).__name__)
self.frontend = frontend
self.frontend.attach(self)
self.helper = self
self.addons: dict[Union[str, Type[TAddon]], TAddon] = {}
self._cli_commands = OrderedDict()
self.load_addons()
self._device = None
if device_connector is not None:
self.connect_device(device_connector)
if frontend is None:
frontend = DummyFrontend()
logger.debug("成功初始化模块")
def addon(self, cls: Union[ForwardRef[Type[TAddon]], Type[TAddon]]) -> TAddon:
from .addon import _addon_registry
dealias = _addon_registry[cls]
if dealias in self.addons:
return self.addons[dealias]
elif type(dealias) == type:
logger.debug("loading addon %s", dealias.__qualname__)
instance = dealias(self)
self.addons[dealias] = instance
return instance
else:
raise TypeError("cls")
def _ensure_device(self):
if self._device is None:
new_device = self.frontend.request_device_connector()
if new_device is None:
raise RuntimeError("no device connected")
self.connect_device(connector=new_device)
@property
def device(self):
self._ensure_device()
return self._device
@property
def viewport(self):
self._ensure_device()
return self._viewport
def connect_device(self, connector=None, *, adb_serial=None):
if connector is not None:
self._device = connector
elif adb_serial is not None:
self._device = ADBConnector(adb_serial)
else:
self._device = None
return
self._viewport: tuple[int, int] = self._device.screen_size
self.vw = self._viewport[0] / 100
self.vh = self._viewport[1] / 100
self.on_device_connected()
self.frontend.notify('current-device', str(self._device))
def on_device_connected(self):
pass
def load_addons(self):
pass
|
the-stack_106_25762 | """Opcodes printing
Takes .py files and yield opcodes (and their arguments) for ordinary python programs.
This file can also be imported as a module and contains the following
functions:
* expand_bytecode - function find and extends bytecode result
* bc_print - function print instructions names and human readable description of operation argument
* main - the main function of the script
"""
import marshal
import dis, sys
def expand_bytecode(bytecode):
"""
Function find and extends bytecode result
:param bytecode: bytecode
:return: list with instructions
"""
result = []
for instruction in bytecode:
if str(type(instruction.argval)) == "<class 'code'>":
result += expand_bytecode(dis.Bytecode(instruction.argval))
else:
result.append(instruction)
return result
def bc_print():
"""
Function print instructions names and human readable description of operation argument
:return: None
"""
for i in sys.argv[2:]:
source = None
if sys.argv[1] == "-py":
with open(i, 'r') as f:
source = f.read()
elif sys.argv[1] == "-pyc":
header_size = 12
with open(i, 'rb') as f:
f.seek(header_size)
source = marshal.load(f)
elif sys.argv[1] == "-s":
source = i
else:
print("Error")
return
bc = dis.Bytecode(source)
res = expand_bytecode(bc)
for instruction in res:
print(f'{instruction.opname}\t {instruction.argrepr}')
print()
print()
if __name__ == "__main__":
# main execution logic for Tasks 2&3
bc_print()
|
the-stack_106_25763 | # Time: O(h)
# Space: O(h)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
if not root:
return root
if root.val > key:
root.left = deleteNode(root.left, key)
elif root.val < key:
root.right = deleteNode(root.right, key)
else:
if not root.left:
right = root.right
del root
return right
elif not root.right:
left = root.left
del root
return left
else:
successor = root.right
while successor.left:
successor = successor.left
root.val = successor.val
root.right = deleteNode(root.right, successor.val)
return root
|
the-stack_106_25764 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from mock import MagicMock
from indico.modules.groups import GroupProxy
from indico.modules.networks.models.networks import IPNetworkGroup
from indico.modules.users import User
from indico.util.user import iter_acl
def test_iter_acl():
user = User()
user_p = MagicMock(principal=user, spec=['principal'])
ipn = IPNetworkGroup()
ipn_p = MagicMock(principal=ipn, spec=['principal'])
local_group = GroupProxy(123, _group=MagicMock())
local_group_p = MagicMock(principal=local_group, spec=['principal'])
remote_group = GroupProxy('foo', 'bar')
remote_group_p = MagicMock(principal=remote_group, spec=['principal'])
acl = [ipn, user_p, remote_group, local_group_p, user, local_group, remote_group_p, ipn_p]
assert list(iter_acl(iter(acl))) == [user_p, user,
ipn, ipn_p,
local_group_p, local_group,
remote_group, remote_group_p]
|
the-stack_106_25765 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import subprocess
import socket
import datetime
import os
import ctypes
import platform
NO_NGINX = 0
NO_NGINX_CONF = 1
NO_NGINX_LOG = 2
SUCCESS = 6
def check_memory(path, style='M'):
i = 0
for dirpath, _, filename in os.walk(path):
for ii in filename:
i += os.path.getsize(os.path.join(dirpath, ii))
memory = i / 1024. / 1024. / 1024.
return memory
def get_free_space(folder):
""" Return folder/drive free space (in bytes)
"""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
return free_bytes.value/1024/1024/1024
else:
st = os.statvfs(folder)
return st.f_bavail * st.f_frsize/1024/1024/1024.
def get_access_path():
response_code = 0
conf_path = ''
log_path = ''
log_path_tmp = ''
process_tmp = []
# prefix_path_index = 0
tmp1 = subprocess.Popen(['ps -efw'], stdout=subprocess.PIPE, shell=True)
tmp2 = subprocess.Popen(
['grep nginx'], stdin=tmp1.stdout, stdout=subprocess.PIPE, shell=True)
ps_tmp = tmp2.stdout.read()
lines = ps_tmp.split('\n')
for line in lines:
if 'master process' in line:
process_tmp = line.split()
# prefix_path_index = process_tmp.index('process') + 1
break
if process_tmp == []:
response_code = NO_NGINX
return response_code, conf_path, log_path
pid = process_tmp[1]
tmp5 = subprocess.Popen(['ls -l /proc/' + pid + "/exe"], stdout=subprocess.PIPE, shell=True)
proc_tmp = tmp5.stdout.read()
nginx_path = proc_tmp.split()[-1]
prefix_path = nginx_path.split('sbin')[0]
find_sp = subprocess.Popen(
['find ' + prefix_path + ' -name nginx.conf'], stdout=subprocess.PIPE, shell=True)
conf_path = find_sp.stdout.read().strip('\n')
print("conf_path", conf_path)
if conf_path == '':
response_code = NO_NGINX_CONF
return response_code, conf_path, log_path
tmp3 = subprocess.Popen(['cat ' + conf_path],
stdout=subprocess.PIPE, shell=True)
tmp4 = subprocess.Popen(
['grep access.log'], stdin=tmp3.stdout, stdout=subprocess.PIPE, shell=True)
cat_res = tmp4.stdout.read().split('\n')
if cat_res == '':
response_code = NO_NGINX_LOG
return response_code, conf_path, log_path
# cat_res ='''
# access_log logs/access.log main buffer=128k flush=5s;
# access_log logs/access_for_big_data.log bigData buffer=128k flush=5s;
# '''
# conf_path = "/opt/huawei/openred/nginx/conf/nginx.conf"
for line in cat_res:
if "main" in line:
log_path_tmp = line.split()[1]
break
print("log_path_tmp", log_path_tmp)
# print(cat_res)
# for i in cat_res:
# if len(i) > 2:
# log_path_tmp = re.match(r'^(\S*)\/(access\.log)', i)
# if log_path_tmp != None:
# log_path = log_path_tmp.group(0)
# print(log_path)
if log_path_tmp == '':
response_code = NO_NGINX_LOG
return response_code, conf_path, log_path
if log_path_tmp[0] == '/':
log_path = log_path_tmp
else:
log_path = conf_path.replace("conf/nginx.conf", log_path_tmp)
response_code = SUCCESS
return response_code, conf_path, log_path
def cp_file(src_path, dir_path):
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
ip = ip.replace('.', '-')
cmd = ['cp', src_path, dir_path]
subprocess.Popen(cmd, stdout=subprocess.PIPE)
def write_log(content):
with open("/tmp/get-logs-soc.txt", 'a') as f:
f.write(content)
f.write('\n')
print(content)
if __name__ == "__main__":
write_log("[INFO] BEGIN!!!!!!")
response, conf_path, log_path = get_access_path()
print(log_path)
print(conf_path)
if response == 6:
free_space = get_free_space('/tmp')
file_size = check_memory(log_path)
if free_space > file_size:
file_path = '/tmp/nginx_proof'
tar_file = '/tmp/nginx_proof.tar.gz'
if not os.path.exists(file_path):
os.makedirs(file_path)
cp_file(conf_path,file_path)
cp_file(log_path,file_path)
os.system("tar czf "+ tar_file + ' -C ' + file_path + ' .')
else:
write_log("[ERROR] Free spcace is limited!\n[INFO] free space: {free_space} \n[INFO] files_size:{file_size}".format(free_space=free_space, file_size=file_size))
else:
write_log("[ERROR]Fail to find log")
print('FINISH!')
|
the-stack_106_25766 | import eel
import logging
from core.cmp.functions import analize_grammar, make_tree
@eel.expose
def pipeline(data):
values = analize_grammar(data)
fd = open("./web/template.html", 'r', encoding='UTF-8')
data = fd.read()
fd.close()
sec = data.split('%s')
html = []
for i in range(len(sec)):
html.append(sec[i])
html.append(values[i])
return ''.join(html)
@eel.expose
def derivationTree(text, w, parser_name):
d = make_tree(text, w, parser_name)
return d
def main():
eel.init('web')
eel_options = {'port': 8045}
eel.start('index.html', size=(1000, 860), options=eel_options, block=False, suppress_error=True)
while True:
eel.sleep(0.1)
if __name__ == '__main__':
main() |
the-stack_106_25767 |
import os
from conans import ConanFile, CMake
class Protobuf(ConanFile):
name = "protobuf"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False]}
default_options = {"shared": False}
exports = "*"
generators = "cmake", "cmake_find_package"
requires = "zlib/0.1@xbuild/scenario"
def build(self):
cmake = CMake(self)
settings = "|".join(map(str, [self.settings.os, self.settings.arch, self.settings.compiler, self.settings.build_type]))
options = "|".join(map(str, ["shared={}".format(self.options.shared)]))
cmake.definitions["MESSAGE:STRING"] = "|".join([settings, options])
cmake.configure()
cmake.build()
def package(self):
self.copy("*.h", dst="include", keep_path=True)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
self.copy("*protoc_exe*", src="bin", dst="bin", keep_path=False)
self.copy("*.cmake", src="cmake", dst="lib/cmake", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["protobuf"]
self.cpp_info.exes = ["protoc_exe"]
self.cpp_info.build_modules = [os.path.join("lib", "cmake", "macro.cmake"),]
|
the-stack_106_25768 | import os
import logging
import time
import pathlib
import condoloader
def main(datadir):
target = condoloader.CondoLoader()
if target.databaseisready():
target.loadpluto_load(datadir)
target.loadcondo_load(datadir)
target.loadcondo()
else:
readymsg = ("{0}{1}"
"Target database isnt ready, check SDECONN environmental, "
"work tables, schema.sql one time setup{2}{3}".format('\n'
,'\n'
,'\n'
,'\n'))
logging.error(readymsg)
return 0
return target.bblcount
if __name__ == '__main__':
timestr = time.strftime("%Y%m%d-%H%M%S")
try:
targetlog = os.path.join(os.environ['TARGETLOGDIR']
,'loadcondo-{0}.log'.format(timestr))
except:
targetlog = os.path.join(os.getcwd()
,'loadcondo-{0}.log'.format(timestr))
logging.basicConfig(filename=targetlog
,level=logging.INFO)
pdatadir = os.path.join(pathlib.Path(__file__).parent
,'data')
kount = main(pdatadir)
if (kount == 0 or kount is None):
logging.error('Failed to load any condos')
retval = 1
else:
logging.info('Successfully loaded {0} bbls to the condo table'.format(kount))
retval = 0
exit(retval) |
the-stack_106_25769 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def intensity_histogram( data, bins, x_label, y_label, legend, filename, markers , display=True):
plt.figure(figsize=(18,10), facecolor='w')
# first make a simple histogram
plt.hist( data, bins, alpha=0.7, label=legend)
# now plot the marker lines as vertical lines
low = markers[0] - markers[1]
high = markers[0] + markers[1]
plt.axvline(x=low,color='k', linestyle='--')
plt.axvline(x=high,color='k', linestyle='--')
plt.xlabel(x_label, fontsize=20)
plt.ylabel(y_label, fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=18)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.yscale('log', nonposy='clip')
plt.legend()
plt.savefig(filename)
if display:
plt.show()
def plot_equalized_template(img, filename, display=False, title=None):
#fig = plt.figure(figsize=(13,10), facecolor='w')
fig, ax = plt.subplots(1, figsize=(13,10), facecolor='w')
cntrs = ax.imshow( img, interpolation='none' )
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=10)
fig.colorbar(cntrs, ax=ax)
if title is not None:
ax.set_title(title, fontsize=18)
fig.savefig(filename)
if display:
fig.show()
def plot_run_summaries(scores, filename, display=False):
N = len(scores)
t = np.arange(N)
# first we plot a trace
fig,ax = plt.subplots( 1, figsize=(18,10), facecolor='w')
ax.semilogy( t, scores, '.', markersize=0.5 )
ax.semilogy( t, scores*0+0.5, '--',c='black' )
ax.semilogy( t, scores*0+1.0, '-',c='black' )
ax.semilogy( t, scores*0+2.0, '-.',c='black' )
ax.legend( ['Data','Z-score = 0.5', 'Z-score = 1.0','Z-score = 2.0', ] )
ax.set_xlabel('Image Index', fontsize=20 )
ax.set_ylabel('Template score', fontsize=20 )
ax.tick_params(axis='both', which='major', labelsize=18)
ax.tick_params(axis='both', which='minor', labelsize=15)
fig.savefig(filename+'.score_trace.png')
if display:
fig.show()
# now we do a histogram
fig2,ax2 = plt.subplots(1, figsize=(18,10), facecolor='w')
# first make a simple histogram
ax2.hist( scores, bins=100, range=(0,6), alpha=0.7, label='Template score')
# now plot the marker lines as vertical lines
ax2.set_xlabel('Z-score', fontsize=20)
ax2.set_ylabel('Occurance', fontsize=20)
ax2.tick_params(axis='both', which='major', labelsize=18)
ax2.tick_params(axis='both', which='minor', labelsize=12)
ax2.set_yscale('log', nonposy='clip')
ax2.legend()
fig2.savefig(filename+'.score_histogram.png')
if display:
fig2.show()
|
the-stack_106_25773 | """Support functions for the second-order update equation"""
from abc import ABC, abstractmethod
from typing import Optional
__all__ = ['Sigma', 'numerical_estimate_A']
class Sigma(ABC):
"""Function σ(t) for the second order update equation.
This is an abstract bases class. For any optimization that requires the
second-order update equation, an appropriate problem-specific subclass of
:class:`Sigma` must be implemented that defines
* the evaluation of σ(t) in :meth:`__call__`
* the update of any values that σ(t) depends on parametrically (typically:
any of the parameters A, B, C), in :meth:`refresh`.
An instantiation of that subclass is then passed as `sigma` to
:func:`.optimize_pulses`.
"""
@abstractmethod
def __call__(self, t): # pragma: nocover
"""Evaluate σ(t)"""
raise NotImplementedError()
@abstractmethod
def refresh(
self,
forward_states,
forward_states0,
chi_states,
chi_norms,
optimized_pulses,
guess_pulses,
objectives,
result,
): # pragma: nocover
"""Recalculate the parametric dependencies of σ(t)
This is called at the end of each control iteration, and may be used to
estimate the internal parameters in σ(t)
Args:
forward_states (list): For each objective, an array-like container
(cf. `storage` in :func:`.optimize_pulses`) of the initial
state forward-propagated under optimized controls from the
current iteration.
forward_states0 (list): The forward-propagated states under the
guess controls of the current iteration.
chi_states (list): The (normalized) boundary condition for the
backward-propagation in the current iteration, as returned by
the `chi_constructor` argument to :func:`.optimize_pulses`.
chi_norms (list): The norms of the un-normalized `chi_states`.
optimized_pulses (list[numpy.ndarray]) list of optimized pulses
from the current iteration
guess_pulses (list[numpy.ndarray]) list of guess pulses for the
current iteration
objectives (list[Objective]): The control objectives
result (Result): The result object, up-to-date for the current
iteration
"""
raise NotImplementedError()
def _overlap(a, b) -> Optional[complex]:
"""Complex overlap of two quantum objects.
If `a`, `b` are not quantum objects or are not compatible, return None.
"""
try:
if a.type == b.type == 'oper':
if a.isherm:
return complex((a * b).tr())
else:
return complex((a.dag() * b).tr())
else:
return a.overlap(b)
except AttributeError:
return None
def numerical_estimate_A(
forward_states, forward_states0, chi_states, chi_norms, Delta_J_T
):
r"""Update the second-order parameter $A$.
Calculate the new value of $A$ according to the equation
.. math::
A^{(i+1)} = \frac{
\sum_k 2 \Re \Braket{\chi_k(T)}{\Delta\phi_k(T)} + \Delta J_T
}{
\sum_k \Braket{\Delta \phi_k(T)}{\Delta\phi_k(T)}
},
where $\Delta\phi_k$ is the difference of the `forward_states`
$\ket{\phi_k^{(i)}}$ propagated under the optimized pulse of iteration
$(i)$, and the `forward_states0` $\ket{\phi_k^{(i-1)}}$ propagated under
the guess pulse of iteration $(i)$ -- that is, the guess pulse of iteration
$(i-1)$; and $\Delta J_T$ is the difference of the final time functional,
.. math::
\Delta J_T
= J_T(\{\ket{\phi_k^{(i)}(T)}\} - J_T(\{\ket{\phi_k^{(i-1)}(T)}\}.
Args:
forward_states (list): For each objective, the result of a
forward-propagation with the optimized pulses of the current
iteration.
forward_states0 (list): For each objective, the result of a
forward-propagation with the guess pulses of the current iteration
chi_states (list): For each objective, the normalized boundary state
$\ket{\chi_k(T)}/\Abs{\ket{\chi_k(T)}}$ for the
backward-propagation with the guess pulse of the current iteration.
chi_norms (list): The norms of the `chi_states`
Delta_J_T (float): The value by which the final time functional
improved in the current iteration.
"""
n = len(forward_states0) # the number of objectives
Δϕ = [forward_states[k][-1] - forward_states0[k][-1] for k in range(n)]
Δϕ_nrmsq = [_overlap(Δϕ[k], Δϕ[k]).real for k in range(n)]
denom = sum(Δϕ_nrmsq)
if denom > 1.0e-30:
numer = (
sum(
[
(2 * chi_norms[k] * _overlap(chi_states[k], Δϕ[k])).real
for k in range(n)
]
)
+ Delta_J_T
)
return numer / denom
else:
return 0
|
the-stack_106_25774 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2017-2018 The CruZeta developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Dependency: python-bitcoinrpc
from test_framework.util import assert_equal, check_json_precision, \
initialize_chain, start_nodes, stop_nodes, wait_cruzetads, \
cruzetad_processes, rpc_port
from test_framework.authproxy import AuthServiceProxy
from test_framework.netutil import addr_to_hex, get_bind_addrs, all_interfaces
import os
import sys
import shutil
import tempfile
import traceback
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = cruzetad_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_cruzetads()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_cruzetads()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave cruzetads and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing cruzetad/cruzeta-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_cruzetads()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
the-stack_106_25775 | from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from .models import Post,Profile,Comment,Like,User
from .forms import NewPostForm,ProfileForm,CommentForm,LikeForm
from django.db.models import Q
# Create your views here.
@login_required(login_url='/accounts/login/')
def timeline(request):
posts= Post.objects.all().order_by("-id")
profiles= Profile.objects.all()
current_user = request.user
comments=Comment.objects.all()
likes = Like.objects.all()
for post in posts:
num_likes=0
for like in likes:
if post.id == like.post.id:
num_likes +=1
post.likes = num_likes
post.save()
if request.method == 'POST' and 'liker' in request.POST:
post_id = request.POST.get("liker")
likeform = LikeForm(request.POST)
if likeform.is_valid():
post_id = int(request.POST.get("liker"))
post = Post.objects.get(id = post_id)
like = likeform.save(commit=False)
like.username = request.user
like.post = post
like.control = str(like.username.id)+"-"+str(like.post.id)
like.save()
print("like saved")
return redirect("timeline")
else:
likeform = LikeForm()
if request.method == 'POST' and 'unliker' in request.POST:
post_id = request.POST.get("unliker")
post = Post.objects.get(pk=post_id)
control = str(request.user.id)+"-"+str(post.id)
like_delete = Like.objects.get(control=control)
like_delete.delete()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
post_id = int(request.POST.get("idpost"))
post = Post.objects.get(id = post_id)
comment = form.save(commit=False)
comment.username = request.user
comment.post = post
comment.save()
return redirect('timeline')
else:
form = CommentForm()
posts= Post.objects.all().order_by("-id")
likes = Like.objects.all()
likez = Like.objects.values_list('control', flat=True)
likez =list(likez)
return render(request,'timeline.html',{"posts":posts,"profiles":profiles,"current_user":current_user,"comments":comments,"form":form, "likeform":likeform, "likes":likes,"likez":likez,})
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'search' in request.GET and request.GET["search"]:
search_term = request.GET.get("search")
searched_users = Profile.search_profile(search_term)
message=f"Search results for: {search_term}"
return render(request,'search.html',{"message":message,"users":searched_users})
else:
message="You haven't searched for any term."
return render(request,'search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def explore(request):
posts = Post.objects.all()
profiles= Profile.objects.all()[:3]
# form=CommentForm()
# comments=Comment.objects.all()
return render(request,"explore.html",{"posts":posts,"profiles":profiles,})
@login_required(login_url='/accounts/login/')
def profile(request,id):
user_object = request.user
current_user = Profile.objects.get(username__id=request.user.id)
user = Profile.objects.get(username__id=id)
posts = Post.objects.filter(upload_by = user)
follows = Follow.objects.all()
if request.method == 'POST' and 'follower' in request.POST:
print("follow saved")
followed_user_id = request.POST.get("follower")
followform = FollowForm(request.POST)
if followform.is_valid():
followed_user_id = int(request.POST.get("follower"))
current_user = Profile.objects.get(username__id=request.user.id)
follow = followform.save(commit=False)
follow.username = request.user
followed_user = User.objects.get(pk=followed_user_id)
print(followed_user)
follow.followed = followed_user
follow.follow_id = str(follow.username.id)+"-"+str(follow.followed.id)
follow.save()
print("follow saved")
return redirect("profile", user.username.id)
else:
followform = FollowForm()
if request.method == 'POST' and 'unfollower' in request.POST:
followed_user_id = request.POST.get("unfollower")
followed_user = User.objects.get(pk=followed_user_id)
follow_id = str(request.user.id)+"-"+str(followed_user.id)
follow_delete = Follow.objects.get(follow_id=follow_id)
follow_delete.delete()
follows = Follow.objects.all()
followz = Follow.objects.values_list('follow_id', flat=True)
followz =list(followz)
follower =0
following = 0
for follow in followz:
follow = follow.split("-")
if follow[0] == str(user.username.id):
following+=1
if follow[-1] == str(user.username.id):
follower+=1
return render(request, "profile.html", {"current_user":current_user,"posts":posts,"user":user,"user_object":user_object, "follows":follows, "followz":followz,"follower":follower,"following":following})
# def following(request):
# if request.method == 'POST' and 'follower' in request.POST:
# print("follow saved")
@login_required(login_url='/accounts/login/')
def new_post(request):
current_user = Profile.objects.get(username__id=request.user.id)
if request.method == 'POST':
form = NewPostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.upload_by = current_user
post.save()
return redirect('timeline')
else:
form = NewPostForm()
return render(request, 'new_post.html', {"form": form})
@login_required(login_url='/accounts/login/')
def edit_profile(request):
current_user=request.user
user_edit = Profile.objects.get(username__id=current_user.id)
if request.method =='POST':
form=ProfileForm(request.POST,request.FILES,instance=request.user.profile)
if form.is_valid():
form.save()
print('success')
else:
form=ProfileForm(instance=request.user.profile)
print('error')
return render(request,'edit_profile.html',locals()) |
the-stack_106_25783 |
import os
import cv2
import random
import tensorflow as tf
import os.path as osp
import numpy as np
import cityscapesscripts.helpers.labels as CSLabels # to be deprecated
from glob import glob
# physical_devices = tf.config.list_physical_devices('GPU')
# try:
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
# tf.config.experimental.set_memory_growth(physical_devices[1], True)
# tf.config.experimental.set_memory_growth(physical_devices[2], True)
# tf.config.experimental.set_memory_growth(physical_devices[3], True)
# except:
# # Invalid device or cannot modify virtual devices once initialized.
# pass
CLASSES = ('background', 'crack', 'efflorescence', 'rebar_exposure', 'spalling',)
PALETTE = [[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 255, 255], [255, 0, 255],]
class Concrete_Damage_Dataset_as_Cityscapes:
"""
Cityscapes dataset.
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
Code Reference :
[1] https://github.com/open-mmlab/mmsegmentation
"""
def __init__(self, data_dir, data_type = 'train'):
self.classes = CLASSES
self.palette = PALETTE
self.data_dir = data_dir
self.img_dir = osp.join(data_dir, 'leftImg8bit', data_type)
self.ann_dir = osp.join(data_dir, 'gtFine', data_type)
self.img_suffix = '_leftImg8bit.png'
self.seg_map_suffix = '_gtFine_labelIds.png'
# load annotations
self.img_infos = self.load_img_infos()
def load_img_infos(self):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
Returns:
list[dict]: All image info of dataset.
Code Reference :
[1] https://github.com/open-mmlab/mmsegmentation
"""
img_infos = []
img_list = []
for _, _, files in os.walk(self.img_dir):
for file in files:
if file.endswith(self.img_suffix):
img_list.append(file)
for img in img_list:
img_info = dict(filename=img)
seg_map = img.replace(self.img_suffix, self.seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print(img_infos)
return img_infos
def prepare_img(self, idx):
""" Read image from the dataset directory
Args:
Returns:
"""
img_filename = self.img_infos[idx]['filename']
img_prefix = img_filename.split('_')[0]
img_path = osp.join(self.img_dir, img_filename)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def prepare_seg_mask(self, idx):
""" Read segmentation mask from the annotation directory
Args:
idx (int): Index of data.
Returns:
seg_copy (array) : return of _convert_to_label_id
"""
seg_filename = self.img_infos[idx]['ann']['seg_map']
seg_prefix = seg_filename.split('_')[0]
seg_path = osp.join(self.ann_dir, seg_filename)
seg = cv2.imread(seg_path, cv2.IMREAD_UNCHANGED)
return seg
# @staticmethod
# def _convert_to_label_id(seg):
# """Convert trainId to id for cityscapes."""
# seg_copy = seg.copy()
# for label in CSLabels.labels:
# # print(label.name)
# seg_copy[seg == label.id] = label.trainId
# return seg_copy
def __len__ (self) :
return len(self.img_infos)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
Reference :
[1] https://github.com/wutianyiRosun/CGNet/blob/master/dataset/cityscapes.py
"""
data = {}
# data['image'] = self.prepare_img(idx)
# data['segmentation_mask'] = self.prepare_seg_mask(idx)
image = self.prepare_img(idx)
label = self.prepare_seg_mask(idx)
f_scale = 1 + random.randint(0, 5) / 10.0 #random resize between 0.5 and 2
img_h, img_w = label.shape
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
img_h_rsz, img_w_rsz = label.shape
h_off = random.randint(0, img_h_rsz - img_h)
w_off = random.randint(0, img_w_rsz - img_w)
#roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(image[h_off : h_off+img_h, w_off : w_off+img_w], np.float32)
label = np.asarray(label[h_off : h_off+img_h, w_off : w_off+img_w], np.float32)
if np.random.uniform() > 0.5 :
image = image*np.random.uniform(0.75, 1.25)
data['image'] = image
data['segmentation_mask'] = label
return data
|
the-stack_106_25784 | import json
from collections import defaultdict
name_box_id = defaultdict(list)
id_name = dict()
f = open(
r"F:\2_doc\7_datasets\COCO2017\annotations\instances_train2017.json",
encoding='utf-8')
data = json.load(f)
annotations = data['annotations']
for ant in annotations:
id = ant['image_id']
name = r'F:\2_doc\7_datasets\COCO2017\train2017\%012d.jpg' % id
cat = ant['category_id']
if cat >= 1 and cat <= 11:
cat = cat - 1
elif cat >= 13 and cat <= 25:
cat = cat - 2
elif cat >= 27 and cat <= 28:
cat = cat - 3
elif cat >= 31 and cat <= 44:
cat = cat - 5
elif cat >= 46 and cat <= 65:
cat = cat - 6
elif cat == 67:
cat = cat - 7
elif cat == 70:
cat = cat - 9
elif cat >= 72 and cat <= 82:
cat = cat - 10
elif cat >= 84 and cat <= 90:
cat = cat - 11
name_box_id[name].append([ant['bbox'], cat])
f = open(r'f:/1_code/python/keras-YOLOv3-mobilenet/coco2017/2007_train.txt', 'w')
for key in name_box_id.keys():
f.write(key)
box_infos = name_box_id[key]
for info in box_infos:
x_min = int(info[0][0])
y_min = int(info[0][1])
x_max = x_min + int(info[0][2])
y_max = y_min + int(info[0][3])
box_info = " %d,%d,%d,%d,%d" % (
x_min, y_min, x_max, y_max, int(info[1]))
f.write(box_info)
f.write('\n')
f.close()
|
the-stack_106_25786 | # -*- coding: utf-8 -*-
# Investigating ConvNets to create high quality xG models - https://www.opengoalapp.com/xg-with-cnns-full-study
# by @openGoalCharles
# Tested with tensorflow 2.2.0 - some of the visualisations will definitely need >= v2.0.0, not sure about the core code
# The model is lightweight - will train in a few minutes with a low end GPU, training on CPU is definitely acceptable
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from ShotsToArrays import ShotsToArrays
from LoadEvents import LoadEvents
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.models import Model
from sklearn.model_selection import train_test_split
from sklearn.calibration import calibration_curve
from sklearn.isotonic import IsotonicRegression
from sklearn.metrics import log_loss
from sklearn.linear_model import LogisticRegression
#-----------LOAD THE DATA---------------------
# choose one of the following options:
#shots = LoadEvents('shots') # call StatsBomb API for the latest open shot data - chuck everything into the model!
#DataIn, DataOut = ShotsToArrays(shots) # process the shot dataframe into set of 3 channel arrays and accompanying output data
# OR....
# pre-load a snapshot of the data (June 2020) that has already been processed
loaded = np.load('data/input_compressed.npz')
DataIn = loaded['a']
DataOut = pd.read_pickle("data/output_arrays.pkl")
#---------------------------------------------
# split data into train and test
x_train, x_test, out_train, out_test = train_test_split(DataIn, DataOut, test_size=0.3, random_state = 42)
# split into a further set for training post-calibration e.g. isotonic regression
#x_train, x_val, out_train, out_val = train_test_split(x_train, out_train, test_size = 0.3, random_state = 42)
x_test = np.float32(x_test)
x_train = np.float32(x_train)
y_train= out_train.loc[:,'shot_outcome'].values.astype('float32')
y_test= out_test.loc[:,'shot_outcome'].values.astype('float32')
#-------DEFINE CNN MODEL--------------------------
activation = tf.keras.layers.LeakyReLU(alpha=0.1)
input_img = Input(shape=(40, 80, 3), dtype ='float16' )
block1 = Conv2D(32, (9, 9), activation=activation, padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(block1)
block2 = Conv2D(64, (5, 5), activation=activation, padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(block2)
block3 = Conv2D(64, (9, 9), activation=activation, padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(block3)
x = Flatten()(x)
x = Dense(48, activation = activation)(x)
output = Dense(1, activation = 'sigmoid')(x)
model = Model(input_img, output)
#--------------------------------------------------
#------COMPILE MODEL AND SET CALLBACKS FOR EARLY STOPPING AND REDUCTION OF LEARNING RATE -----------------
optimizer = 'sgd'
model.compile(optimizer=optimizer, loss='binary_crossentropy')
early_stop = EarlyStopping(monitor='val_loss', min_delta=0,
patience=10, verbose=0, mode='auto',
baseline=None, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.00001)
#----------------------------------------------------------------------------------------------------------
#-------TRAIN MODEL--------------------------------
model.fit(x_train, y_train,
epochs=200,
batch_size=32,
shuffle=True,
validation_data=(x_test, y_test),
callbacks = [early_stop, reduce_lr])
#--------------------------------------------------
# generate model xG predictions of of training set
y_pred = model.predict(x_test)
# plot calibration curve for model
ccurve = calibration_curve(y_test, y_pred, n_bins = 15) # returns true proportion [0] and average predicted prob [1]
plt.scatter(ccurve[1],ccurve[0])
plt.title('CNN model Calibration Curve - Final with Optimization')
plt.xlabel('Average of model predicted xG')
plt.ylabel('Average of actual goal outcome')
x = [0,1]
y = [0,1]
plt.plot(x,y, '--')
plt.show()
# plot comparison of CNN model predictions vs StatsBomb's predictions on the same data
sb_xg = out_test.loc[:,'statsbomb_xg']
plt.scatter(y_pred,sb_xg, alpha = 0.1)
plt.show()
# plot calibration curve for StatsBomb model on the test data
sb_ccurve = calibration_curve(y_test, sb_xg, n_bins = 15)
plt.scatter(sb_ccurve[1],sb_ccurve[0])
plt.title('StatsBomb model Calibration Curve')
plt.xlabel('Average of model predicted xG')
plt.ylabel('Average of actual goal outcome')
x = [0,1]
y = [0,1]
plt.plot(x,y, '--')
plt.show()
# calculate benchmark log loss values
ll_model = log_loss(y_test, y_pred) # CNN model
ll_sb = log_loss(y_test, sb_xg) # StatsBomb model
ll_fixed = log_loss(y_test, np.repeat(np.mean(y_train),len(y_test))) # fixed mean of training data prediction
ll_rand = log_loss(y_test, np.random.rand(len(y_test))) # random number
# do same for perfect model
goal_list = []
for shot in sb_xg: # simulate goal/no goal outcome assuming SB model is ground truth
if np.random.rand() <=shot:
goal_list.append(1)
else:
goal_list.append(0)
ll_perfect = log_loss(goal_list, sb_xg) # log loss for perfect model with representative data
## train a logistic regression model using x and y locs as inputs
#X = np.array(out_train.loc[:,['loc_x', 'loc_y']]) # requires data processed from ShotsToArrays
#lr_model = LogisticRegression()
#model_out = lr_model.fit(X,y_train)
#lr_test = model_out.predict_proba(np.array(out_test.loc[:,['loc_x', 'loc_y']]))[:,1] # generate the probabilities
#ll_lr = log_loss(y_test, lr_test) # calculate log loss of logistic regression model
# plot a calibration curve for the logistic regression model
#lr_ccurve = calibration_curve(y_test, lr_test, n_bins = 15)
#plt.scatter(lr_ccurve[0],lr_ccurve[1])
#x = [0,1]
#y = [0,1]
#plt.plot(x,y, '--')
#plt.show()
#------------ISOTONIC REGRESSION EXPERIMENT---------------------
# # uncomment this block and uncommment line 35 (additional split of data)
#ir = IsotonicRegression()
#ir.fit(model.predict(x_val).squeeze(),y_val)
#calibrated = ir.predict(xG.squeeze())
#
#calibrated = np.clip(calibrated,0.001,0.999) # stop divide by zero error
#
#calib_ccurve = calibration_curve(y_test, calibrated, n_bins = 15)
#plt.scatter(calib_ccurve[1],calib_ccurve[0])
#plt.title('CNN model with isotonic regression correction Calibration Curve')
#plt.xlabel('Average of model predicted xG')
#plt.ylabel('Average of actual goal outcome')
#x = [0,1]
#y = [0,1]
#plt.plot(x,y, '--')
#plt.show()
#
#ll_cal = log_loss(y_test, calibrated)
#----------------------------------------------------------------
#-------------------CNN VISUALISATION PLOTTING---------------------------------------
for i in range(5): # plot a selection of input images from the test set
# display original images
plt.imshow(x_test[i,:,:,2]) # 4th argument is the channel number so select 0, 1 or 2
plt.show()
layer_dict = dict([(layer.name, layer) for layer in model.layers]) # dictionary of layers in model with names
# The dimensions of our input image
img_width = 40
img_height = 80
#Our target layer: we will visualize the filters from this layer.
# See `model.summary()` for list of layer names, if you want to change this.
layer_name = "conv2d_1"
# Set up a model that returns the activation values for our target layer
layer = model.get_layer(name=layer_name)
feature_extractor = tf.keras.Model(inputs=model.inputs, outputs=layer.output)
# the following is adapted from https://keras.io/examples/vision/visualizing_what_convnets_learn/
# function to calculate loss
def compute_loss(input_image, filter_index):
activation = feature_extractor(input_image)
# We avoid border artifacts by only involving non-border pixels in the loss.
filter_activation = activation[:, 2:-2, 2:-2, filter_index]
return tf.reduce_mean(filter_activation)
# define gradient ascent function - NOTE THIS DEFINITELY REQUIRES TF v2
@tf.function
def gradient_ascent_step(img, filter_index, learning_rate):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img, filter_index)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads = tf.math.l2_normalize(grads)
img += learning_rate * grads
return loss, img
# initialise image with some noise
def initialize_image():
# We start from a gray image with some random noise
img = tf.random.uniform((1, img_width, img_height, 3))
return (img)
# create the visualisation - can play around with the number of iterations and learning rate
def visualize_filter(filter_index):
# We run gradient ascent for 20 steps
iterations = 30
learning_rate = 10.0
img = initialize_image()
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, filter_index, learning_rate)
# Decode the resulting input image
img = deprocess_image(img[0].numpy())
return loss, img
# convert image into viewable form
def deprocess_image(img):
# Normalize array: center on 0.,
img -= img.mean()
img /= img.std() + 1e-5
img *= 0.15 # set variance - can play around with this value to crisp up images
# Clip to [0, 1]
img += 0.5
img = np.clip(img, 0, 1)
# Convert to RGB array
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img
# plot some maximally activated inputs!
for filter_no in range(64): # make sure this value matches number of filters in the layer selected
loss, img = visualize_filter(filter_no)
plt.show()
plt.imshow(img[:,:,2])
plt.show()
print(filter_no)
|
the-stack_106_25788 | import struct
def ieee_single_encode(number: float) -> str:
packed = struct.pack("!f", number)
unpacked = [f"{b:b}".rjust(8, "0") for b in packed]
encoded = "".join(unpacked)
return encoded
if __name__ == "__main__":
with open("sideinfordeci.txt") as input_f, open(
"sideinforbina.txt", "w"
) as output_f:
for line in input_f:
number = float(line.rstrip())
encoded = ieee_single_encode(number)
output_f.write(f"{encoded}\n")
|
the-stack_106_25789 | # Reminder: For the history array, "cooperate" = 1, "defect" = 0
def forgivingCopycat(history):
round = history.shape[1]
if history[1,-1] == 0:
if round > 3:
if history [0, -1] == 1 and history [0,-2] == 0 and history [1, -2] == 1:
return "cooperate"
return "defect"
return "cooperate"
def detectRandomness(history):
round = history.shape[1]
if round <=16:
return False
randomness = 0
for i in range (1, 10):
if history[1,-i] == 0:
if history[0,-i-1] and history[0,-i-2] and history[0,-i-3]:
randomness+=1
if randomness>=2:
return True
return False
def detectSwitch(history):
round = history.shape[1]
if round <=10:
return False
if history[1,-1] and history[1, -2] ==0 and history[1,-3] and history[1, -4] ==0 and history[1,-5] and history[1, -6] ==0:
return True
if history[1,-1]==0 and history[1, -2] and history[1,-3] ==0 and history[1, -4] and history[1,-5] ==0and history[1, -6]:
return True
return False
def abuseSwitch(history, memory):
round = history.shape[1]
ABSOLUTION = 0
LASTABSOLUTION = 1
TACTIC = 2
COOLDOWN = 3
TOTALLYISNTSWITCH = 6
if history[1,-1]==0 and history[1, -2] ==0 and history[1,-3] ==0 and history[1, -4]==0:
memory[TACTIC] = "absolution"
memory[LASTABSOLUTION] = round
memory[COOLDOWN] = 3
memory[TOTALLYISNTSWITCH] = True
return "cooperate", memory
return "defect", memory
def goWithSwitch(history, memory):
round = history.shape[1]
ABSOLUTION = 0
LASTABSOLUTION = 1
TACTIC = 2
COOLDOWN = 3
UNTRUTHWORTHIED= 4
TOTALLYISNTSWITCH = 6
if history[1,-1]==0 and history[1, -2] ==0 and history[1,-3] ==0 and history[1, -4]==0:
sum = 0
for i in range (1, round-memory[LASTABSOLUTION]+1):
if history[1,-i]:
if history[0, -i]:
sum+=2
else:
sum+=4
else:
if history[0,-i]:
sum-=1
else:
sum+=0
if sum <= 0:
memory[TACTIC] = "untruthworthy"
memory[UNTRUTHWORTHIED]+=1
else:
memory[LASTABSOLUTION] = round
memory[TACTIC] = "absolution"
memory[ABSOLUTION] = False
if history[1, -1]:
return "defect", memory
return "cooperate", memory
def abuseRandomness(history, memory):
round = history.shape[1]
TACTIC = 2
COOLDOWN = 3
TOTALLYISNTRANDOM = 5
if round<=10:
return "defect", memory
sum = 0
for i in range (1, 11):
sum += 1-history[1, -i]
if sum ==10:
memory[TACTIC] = "absolution"
memory[COOLDOWN] = 3
memory[TOTALLYISNTRANDOM] = True
return "cooperate", memory
return "defect", memory
def untruthworthy(history, memory):
TACTIC = 2
COOLDOWN = 3
UNTRUTHWORTHIED = 4
if memory[UNTRUTHWORTHIED] >=3:
memory[TACTIC]= "reallyuntruthworthy"
memory[COOLDOWN] = 0
if history [1, -1] == 1 and history [1, -2] == 1:
memory[COOLDOWN] = 0
memory[TACTIC]= "priest"
return "cooperate", memory
return "defect", memory
def reallyuntruthworthy(history, memory):
TACTIC = 2
COOLDOWN = 3
UNTRUTHWORTHIED = 4
memory[COOLDOWN]+=1
if history [1, -1] == 1 and history [1, -2] == 1 and history [1, -3] == 1 :
memory[TACTIC]= "priest"
memory[COOLDOWN] = 0
return "cooperate", memory
if memory[COOLDOWN] >=8 and memory[UNTRUTHWORTHIED] >=3:
memory[TACTIC]= "untruthworthy"
memory[UNTRUTHWORTHIED]= -200
return "defect", memory
def absolution(history, memory):
TACTIC = 2
COOLDOWN = 3
UNTRUTHWORTHIED = 4
if memory[COOLDOWN] >0:
memory [COOLDOWN] -=1
return "cooperate", memory
sum = history [1, -1] + history [1, -2] + history [1, -3]
if sum >=1:
memory[TACTIC] = "priest"
memory[COOLDOWN] = 0
return "cooperate", memory
memory[TACTIC] = "reallyuntruthworthy"
return "defect", memory
def priest(history, memory):
round = history.shape[1]
ABSOLUTION = 0
LASTABSOLUTION = 1
TACTIC = 2
COOLDOWN = 3
UNTRUTHWORTHIED = 4
memory[COOLDOWN] +=1
if history [1, -1]== 0 and history [1, -2]== 0 and history [1, -3]== 0:
if memory[ABSOLUTION]:
memory[LASTABSOLUTION] = round
memory[TACTIC] = "absolution"
memory[ABSOLUTION] = False
memory[COOLDOWN] = 3
else:
sum = 0
for i in range (1, round-memory[LASTABSOLUTION]+1):
if history[1,-i]:
if history[0, -i]:
sum+=2
else:
sum+=4
else:
if history[0,-i]:
sum-=1
else:
sum+=0
if sum <= 0:
memory[TACTIC] = "untruthworthy"
memory[UNTRUTHWORTHIED]+=1
else:
memory[LASTABSOLUTION] = round
memory[TACTIC] = "absolution"
memory[ABSOLUTION] = False
return forgivingCopycat(history), memory
def strategy(history, memory):
round = history.shape[1]
ABSOLUTION = 0
LASTABSOLUTION = 1
TACTIC = 2
COOLDOWN = 3
UNTRUTHWORTHIED = 4
TOTALLYISNTRANDOM = 5
TOTALLYISNTSWITCH = 6
if round == 0:
mem = []
mem.append(True)
mem.append(True)
mem.append(0)
mem.append(0)
mem.append(0)
mem.append(False)
mem.append(False)
return "cooperate", mem
if round == 1:
return "cooperate", memory
if round == 2:
return "cooperate", memory
if round == 3:
memory[TACTIC] = "priest"
if history[1, -1] ==0 or history[1, -2] ==0 or history[1, -3] ==0:
if history[1, -1] ==0 and history[1, -2] ==0 and history[1, -3] ==0:
memory[ABSOLUTION] = False
return "defect", memory
else:
return "cooperate", memory
if detectSwitch(history):
if memory[TOTALLYISNTSWITCH]:
memory[TACTIC] = "goWithSwitch"
else:
memory[TACTIC] = "abuseSwitch"
if memory[TOTALLYISNTRANDOM] == 0 and detectRandomness(history):
memory[TACTIC] = "abuseRandomness"
if memory[TACTIC] == "priest":
return priest(history, memory)
if memory[TACTIC] == "abuseSwitch":
return abuseSwitch(history, memory)
if memory[TACTIC] == "goWithSwitch":
return goWithSwitch(history, memory)
if memory[TACTIC] == "absolution":
return absolution(history, memory)
if memory[TACTIC] == "abuseRandomness":
return abuseRandomness(history, memory)
if memory[TACTIC] == "untruthworthy":
return untruthworthy(history, memory)
if memory[TACTIC] == "reallyuntruthworthy":
return reallyuntruthworthy(history, memory)
print(memory[TACTIC])
return "cooperate", memory
|
the-stack_106_25790 |
from itertools import product
import numpy as np
INV_SQRT_3 = 1.0 / np.sqrt(3.0)
ASIN_INV_SQRT_3 = np.arcsin(INV_SQRT_3)
def csgrid_GMAO(res):
"""
Return cubedsphere coordinates with GMAO face orientation
Parameters
----------
res : cubed-sphere Resolution
"""
CS = CSGrid(res, offset=-10)
lon = CS.lon_center.transpose(2, 0, 1)
lon_b = CS.lon_edge.transpose(2, 0, 1)
lat = CS.lat_center.transpose(2, 0, 1)
lat_b = CS.lat_edge.transpose(2, 0, 1)
lon[lon < 0] += 360
lon_b[lon_b < 0] += 360
for a in [lon, lon_b, lat, lat_b]:
for tile in [0, 1, 3, 4]:
a[tile] = a[tile].T
for tile in [3, 4]:
a[tile] = np.flip(a[tile], 1)
for tile in [3, 4, 2, 5]:
a[tile] = np.flip(a[tile], 0)
a[2], a[5] = a[5].copy(), a[2].copy() # swap north&south pole
return {'lon': lon, 'lat': lat, 'lon_b': lon_b, 'lat_b': lat_b}
class CSGrid(object):
"""Generator for cubed-sphere grid geometries.
CSGrid computes the latitutde and longitudes of cell centers and edges
on a cubed-sphere grid, providing a way to retrieve these geometries
on-the-fly if your model output data does not include them.
Attributes
----------
{lon,lat}_center : np.ndarray
lat/lon coordinates for each cell center along the cubed-sphere mesh
{lon,lat}_edge : np.ndarray
lat/lon coordinates for the midpoint of the edges separating each
element on the cubed-sphere mesh.
xyz_{center,edge} : np.ndarray
As above, except coordinates are projected into a 3D cartesian space
with common origin to the original lat/lon coordinate system, assuming
a unit sphere.
"""
def __init__(self, c, offset=None):
"""
Parameters
----------
c : int
Number edges along each cubed-sphere edge.
======= ====================
C Lat/Lon Resolution
------- --------------------
24 4 deg x 5 deg
48,45 2 deg x 2.5 deg
96,90 1 deg x 1.25 deg
192,180 0.5 deg x 0.625 deg
384,360 0.25 deg x 0.3125 deg
720 0.12g deg x 0.15625 deg
offset : float (optional)
Degrees to offset the first faces' edge in the latitudinal
direction. If not passed, then the western edge of the first face
will align with the prime meridian.
"""
self.c = c
self.delta_y = 2. * ASIN_INV_SQRT_3 / c
self.nx = self.ny = c + 1
self.offset = offset
self._initialize()
def _initialize(self):
c = self.c
nx, ny = self.nx, self.ny
lambda_rad = np.zeros((nx, ny))
lambda_rad[ 0, :] = 3.*np.pi/4. # West edge
lambda_rad[-1, :] = 5.*np.pi/4. # East edge
theta_rad = np.zeros((nx, ny))
theta_rad[ 0, :] = -ASIN_INV_SQRT_3 + (self.delta_y*np.arange(c+1)) # West edge
theta_rad[-1, :] = theta_rad[0, :] # East edge
# Cache the reflection points - our upper-left and lower-right corners
lonMir1, lonMir2 = lambda_rad[0, 0], lambda_rad[-1, -1]
latMir1, latMir2 = theta_rad[0, 0], theta_rad[-1, -1]
xyzMir1 = latlon_to_cartesian(lonMir1, latMir1)
xyzMir2 = latlon_to_cartesian(lonMir2, latMir2)
xyzCross = np.cross(xyzMir1, xyzMir2)
norm = np.sqrt(np.sum(xyzCross**2))
xyzCross /= norm
for i in range(1, c):
lonRef, latRef = lambda_rad[0, i], theta_rad[0, i]
xyzRef = np.asarray(latlon_to_cartesian(lonRef, latRef, ))
xyzDot = np.sum(xyzCross*xyzRef)
xyzImg = xyzRef - (2. * xyzDot * xyzCross)
xsImg, ysImg, zsImg = xyzImg
lonImg, latImg = cartesian_to_latlon(xsImg, ysImg, zsImg)
lambda_rad[i, 0] = lonImg
lambda_rad[i, -1] = lonImg
theta_rad[i, 0] = latImg
theta_rad[i, -1] = -latImg
pp = np.zeros([3, c+1, c+1])
# Set the four corners
# print("CORNERS")
for i, j in product([0, -1], [0, -1]):
# print(i, j)
pp[:, i, j] = latlon_to_cartesian(lambda_rad[i, j], theta_rad[i, j])
# Map the edges on the sphere back to the cube. Note that all intersections are at x = -rsq3
# print("EDGES")
for ij in range(1, c+1):
# print(ij)
pp[:, 0, ij] = latlon_to_cartesian(lambda_rad[0, ij], theta_rad[0, ij])
pp[1, 0, ij] = -pp[1, 0, ij] * INV_SQRT_3 / pp[0, 0, ij]
pp[2, 0, ij] = -pp[2, 0, ij] * INV_SQRT_3 / pp[0, 0, ij]
pp[:, ij, 0] = latlon_to_cartesian(lambda_rad[ij, 0], theta_rad[ij, 0])
pp[1, ij, 0] = -pp[1, ij, 0] * INV_SQRT_3 / pp[0, ij, 0]
pp[2, ij, 0] = -pp[2, ij, 0] * INV_SQRT_3 / pp[0, ij, 0]
# # Map interiors
pp[0, :, :] = -INV_SQRT_3
# print("INTERIOR")
for i in range(1, c+1):
for j in range(1, c+1):
# Copy y-z face of the cube along j=1
pp[1, i, j] = pp[1, i, 0]
# Copy along i=1
pp[2, i, j] = pp[2, 0, j]
_pp = pp.copy()
llr, ttr = vec_cartesian_to_latlon(_pp[0], _pp[1], _pp[2])
lambda_rad, theta_rad = llr.copy(), ttr.copy()
# Make grid symmetrical to i = im/2 + 1
for j in range(1, c+1):
for i in range(1, c+1):
# print("({}, {}) -> ({}, {})".format(i, 0, i, j))
lambda_rad[i, j] = lambda_rad[i, 0]
for j in range(c+1):
for i in range(c//2):
isymm = c - i
# print(isymm)
avgPt = 0.5*(lambda_rad[i, j] - lambda_rad[isymm, j])
# print(lambda_rad[i, j], lambda_rad[isymm, j], avgPt)
lambda_rad[i, j] = avgPt + np.pi
lambda_rad[isymm, j] = np.pi - avgPt
avgPt = 0.5*(theta_rad[i, j] + theta_rad[isymm, j])
theta_rad[i, j] = avgPt
theta_rad[isymm, j] = avgPt
# Make grid symmetrical to j = im/2 + 1
for j in range(c//2):
jsymm = c - j
for i in range(1, c+1):
avgPt = 0.5*(lambda_rad[i, j] + lambda_rad[i, jsymm])
lambda_rad[i, j] = avgPt
lambda_rad[i, jsymm] = avgPt
avgPt = 0.5*(theta_rad[i, j] - theta_rad[i, jsymm])
theta_rad[i, j] = avgPt
theta_rad[i, jsymm] = -avgPt
# Final correction
lambda_rad -= np.pi
llr, ttr = lambda_rad.copy(), theta_rad.copy()
#######################################################################
## MIRROR GRIDS
#######################################################################
new_xgrid = np.zeros((c+1, c+1, 6))
new_ygrid = np.zeros((c+1, c+1, 6))
xgrid = llr.copy()
ygrid = ttr.copy()
new_xgrid[..., 0] = xgrid.copy()
new_ygrid[..., 0] = ygrid.copy()
# radius = 6370.0e3
radius = 1.
for face in range(1, 6):
for j in range(c+1):
for i in range(c+1):
x = xgrid[i, j]
y = ygrid[i, j]
z = radius
if face == 1:
# Rotate about z only
new_xyz = rotate_sphere_3D(x, y, z, -np.pi/2., 'z')
elif face == 2:
# Rotate about z, then x
temp_xyz = rotate_sphere_3D(x, y, z, -np.pi/2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi/2., 'x')
elif face == 3:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi, 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi/2., 'x')
if ((c % 2) != 0) and (j == c//2 - 1):
print(i, j, face)
new_xyz[0] = np.pi
elif face == 4:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi/2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi/2., 'y')
elif face == 5:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi/2., 'y')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, 0., 'z')
# print((x, y, z), "\n", new_xyz, "\n" + "--"*40)
new_x, new_y, _ = new_xyz
new_xgrid[i, j, face] = new_x
new_ygrid[i, j, face] = new_y
lon_edge, lat_edge = new_xgrid.copy(), new_ygrid.copy()
#######################################################################
## CLEANUP GRID
#######################################################################
for i, j, f in product(range(c+1), range(c+1), range(6)):
new_lon = lon_edge[i, j, f]
if new_lon < 0:
new_lon+= 2*np.pi
if np.abs(new_lon) < 1e-10:
new_lon = 0.
lon_edge[i, j, f] = new_lon
if np.abs(lat_edge[i, j, f]) < 1e-10:
lat_edge[i, j, f] = 0.
lon_edge_deg = np.rad2deg(lon_edge)
lat_edge_deg = np.rad2deg(lat_edge)
#######################################################################
## COMPUTE CELL CENTROIDS
#######################################################################
lon_ctr = np.zeros((c, c, 6))
lat_ctr = np.zeros((c, c, 6))
xyz_ctr = np.zeros((3, c, c, 6))
xyz_edge = np.zeros((3, c+1, c+1, 6))
for f in range(6):
for i in range(c):
last_x = (i == (c-1))
for j in range(c):
last_y = (j == (c-1))
# Get the four corners
lat_corner = [lat_edge[ i, j, f], lat_edge[i+1, j, f],
lat_edge[i+1, j+1, f], lat_edge[ i, j+1, f]]
lon_corner = [lon_edge[ i, j, f], lon_edge[i+1, j, f],
lon_edge[i+1, j+1, f], lon_edge[ i, j+1, f]]
# Convert from lat-lon back to cartesian
xyz_corner = np.asarray(vec_latlon_to_cartesian(lon_corner, lat_corner))
# Store the edge information
xyz_edge[:, i, j, f] = xyz_corner[:, 0]
if last_x:
xyz_edge[:, i+1, j, f] = xyz_corner[:, 1]
if last_x or last_y:
xyz_edge[:, i+1, j+1, f] = xyz_corner[:, 2]
if last_y:
xyz_edge[:, i, j+1, f] = xyz_corner[:, 3]
e_mid = np.sum(xyz_corner, axis=1)
e_abs = np.sqrt(np.sum(e_mid * e_mid))
if e_abs > 0:
e_mid = e_mid / e_abs
xyz_ctr[:, i, j, f] = e_mid
_lon, _lat = cartesian_to_latlon(*e_mid)
lon_ctr[i, j, f] = _lon
lat_ctr[i, j, f] = _lat
lon_ctr_deg = np.rad2deg(lon_ctr)
lat_ctr_deg = np.rad2deg(lat_ctr)
if self.offset is not None:
lon_edge_deg += self.offset
lon_ctr_deg += self.offset
#######################################################################
## CACHE
#######################################################################
self.lon_center = lon_ctr_deg
self.lat_center = lat_ctr_deg
self.lon_edge = lon_edge_deg
self.lat_edge = lat_edge_deg
self.xyz_center = xyz_ctr
self.xyz_edge = xyz_edge
def latlon_to_cartesian(lon, lat):
""" Convert latitude/longitude coordinates along the unit sphere to cartesian
coordinates defined by a vector pointing from the sphere's center to its
surface.
"""
x = np.cos(lat) * np.cos(lon)
y = np.cos(lat) * np.sin(lon)
z = np.sin(lat)
return x, y, z
vec_latlon_to_cartesian = np.vectorize(latlon_to_cartesian)
def cartesian_to_latlon(x, y, z, ret_xyz=False):
""" Convert a cartesian coordinate to latitude/longitude coordinates.
Optionally return the original cartesian coordinate as a tuple.
"""
xyz = np.array([x, y, z])
vector_length = np.sqrt(np.sum(xyz*xyz, axis=0))
xyz /= vector_length
x, y, z = xyz
if (np.abs(x) + np.abs(y)) < 1e-20:
lon = 0.
else:
lon = np.arctan2(y, x)
if lon < 0.:
lon += 2*np.pi
lat = np.arcsin(z)
# If not normalizing vector, take lat = np.arcsin(z/vector_length)
if ret_xyz:
return lon, lat, xyz
else:
return lon, lat
vec_cartesian_to_latlon = np.vectorize(cartesian_to_latlon)
def spherical_to_cartesian(theta, phi, r=1):
""" Convert spherical coordinates in the form (theta, phi[, r]) to
cartesian, with the origin at the center of the original spherical
coordinate system.
"""
x = r * np.cos(phi) * np.cos(theta)
y = r * np.cos(phi) * np.sin(theta)
z = r * np.sin(phi)
return x, y, z
vec_spherical_to_cartesian = np.vectorize(spherical_to_cartesian)
def cartesian_to_spherical(x, y, z):
""" Convert cartesian coordinates to spherical in the form
(theta, phi[, r]) with the origin remaining at the center of the
original spherical coordinate system.
"""
r = np.sqrt(x**2 + y**2 + z**2)
#theta = np.arccos(z / r)
theta = np.arctan2(y, x)
phi = np.arctan2(z, np.sqrt(x**2 + y**2))
# if np.abs(x) < 1e-16:
# phi = np.pi
# else:
# phi = np.arctan(y / x)
return theta, phi, r
vec_cartesian_to_spherical = np.vectorize(cartesian_to_spherical)
def rotate_sphere_3D(theta, phi, r, rot_ang, rot_axis='x'):
""" Rotate a spherical coordinate in the form (theta, phi[, r])
about the indicating axis, 'rot_axis'.
This method accomplishes the rotation by projecting to a
cartesian coordinate system and performing a solid body rotation
around the requested axis.
"""
cos_ang = np.cos(rot_ang)
sin_ang = np.sin(rot_ang)
x, y, z = spherical_to_cartesian(theta, phi, r)
if rot_axis == 'x':
x_new = x
y_new = cos_ang*y + sin_ang*z
z_new = -sin_ang*y + cos_ang*z
elif rot_axis == 'y':
x_new = cos_ang*x - sin_ang*z
y_new = y
z_new = sin_ang*x + cos_ang*z
elif rot_axis == 'z':
x_new = cos_ang*x + sin_ang*y
y_new = -sin_ang*x + cos_ang*y
z_new = z
theta_new, phi_new, r_new = cartesian_to_spherical(x_new, y_new, z_new)
return theta_new, phi_new, r_new
|
the-stack_106_25791 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module containing userid, chatid and log commands"""
from time import sleep
from telethon.tl.functions.channels import LeaveChannelRequest
from userbot import CMD_HELP, BOTLOG, BOTLOG_CHATID, bot
from userbot.events import register, errors_handler
@register(outgoing=True, pattern="^.userid$")
@errors_handler
async def useridgetter(target):
""" For .userid command, returns the ID of the target user. """
message = await target.get_reply_message()
if message:
if not message.forward:
user_id = message.sender.id
if message.sender.username:
name = "@" + message.sender.username
else:
name = "**" + message.sender.first_name + "**"
else:
user_id = message.forward.sender.id
if message.forward.sender.username:
name = "@" + message.forward.sender.username
else:
name = "*" + message.forward.sender.first_name + "*"
await target.edit("**Name:** {} \n**User ID:** `{}`".format(
name, user_id))
@register(outgoing=True, pattern="^.chatid$")
@errors_handler
async def chatidgetter(chat):
""" For .chatid, returns the ID of the chat you are in at that moment. """
await chat.edit("Chat ID: `" + str(chat.chat_id) + "`")
@register(outgoing=True, pattern=r"^.log(?: |$)([\s\S]*)")
@errors_handler
async def log(log_text):
""" For .log command, forwards a message or the command argument to the bot logs group """
if BOTLOG:
if log_text.reply_to_msg_id:
reply_msg = await log_text.get_reply_message()
await reply_msg.forward_to(BOTLOG_CHATID)
elif log_text.pattern_match.group(1):
user = f"#LOG / Chat ID: {log_text.chat_id}\n\n"
textx = user + log_text.pattern_match.group(1)
await bot.send_message(BOTLOG_CHATID, textx)
else:
await log_text.edit("`What am I supposed to log?`")
return
await log_text.edit("`Logged Successfully`")
else:
await log_text.edit("`This feature requires Logging to be enabled!`")
sleep(2)
await log_text.delete()
@register(outgoing=True, pattern="^.kickme$")
@errors_handler
async def kickme(leave):
""" Basically it's .kickme command """
await leave.edit("`Nope, no, no, I go away`")
await bot(LeaveChannelRequest(leave.chat_id))
@register(outgoing=True, pattern="^.unmutechat$")
@errors_handler
async def unmute_chat(unm_e):
""" For .unmutechat command, unmute a muted chat. """
try:
from userbot.modules.sql_helper.keep_read_sql import unkread
except AttributeError:
await unm_e.edit('`Running on Non-SQL Mode!`')
return
unkread(str(unm_e.chat_id))
await unm_e.edit("```Unmuted this chat Successfully```")
sleep(2)
await unm_e.delete()
@register(outgoing=True, pattern="^.mutechat$")
@errors_handler
async def mute_chat(mute_e):
""" For .mutechat command, mute any chat. """
try:
from userbot.modules.sql_helper.keep_read_sql import kread
except AttributeError:
await mute_e.edit("`Running on Non-SQL mode!`")
return
await mute_e.edit(str(mute_e.chat_id))
kread(str(mute_e.chat_id))
await mute_e.edit("`Shush! This chat will be silenced!`")
sleep(2)
await mute_e.delete()
if BOTLOG:
await mute_e.client.send_message(
BOTLOG_CHATID,
str(mute_e.chat_id) + " was silenced.")
@register(incoming=True)
async def keep_read(message):
""" The mute logic. """
try:
from userbot.modules.sql_helper.keep_read_sql import is_kread
except AttributeError:
return
kread = is_kread()
if kread:
for i in kread:
if i.groupid == str(message.chat_id):
await message.client.send_read_acknowledge(message.chat_id)
CMD_HELP.update({
"chat":
".chatid\
\nUsage: Fetches the current chat's ID\
\n\n.userid\
\nUsage: Fetches the ID of the user in reply, if its a forwarded message, finds the ID for the source.\
\n\n.log\
\nUsage: Forwards the message you've replied to in your bot logs group.\
\n\n.kickme\
\nUsage: Leave from a targeted group.\
\n\n.unmutechat\
\nUsage: Unmutes a muted chat.\
\n\n.mutechat\
\nUsage: Allows you to mute any chat."
})
|
the-stack_106_25793 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: gce_instance_template
short_description: create or destroy instance templates of Compute Engine of GCP.
description:
- Creates or destroy Google instance templates
of Compute Engine of Google Cloud Platform.
options:
state:
type: str
description:
- The desired state for the instance template.
default: "present"
choices: ["present", "absent"]
name:
type: str
description:
- The name of the GCE instance template.
required: True
aliases: [base_name]
size:
type: str
description:
- The desired machine type for the instance template.
default: "f1-micro"
source:
type: str
description:
- A source disk to attach to the instance.
Cannot specify both I(image) and I(source).
image:
type: str
description:
- The image to use to create the instance.
Cannot specify both both I(image) and I(source).
image_family:
type: str
description:
- The image family to use to create the instance.
If I(image) has been used I(image_family) is ignored.
Cannot specify both I(image) and I(source).
default: debian-8
disk_type:
type: str
description:
- Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
choices:
- pd-standard
- pd-ssd
default: pd-standard
disk_auto_delete:
description:
- Indicate that the boot disk should be
deleted when the Node is deleted.
default: true
type: bool
network:
type: str
description:
- The network to associate with the instance.
default: "default"
subnetwork:
type: str
description:
- The Subnetwork resource name for this instance.
can_ip_forward:
description:
- Set to C(yes) to allow instance to
send/receive non-matching src/dst packets.
type: bool
default: 'no'
external_ip:
type: str
description:
- The external IP address to use.
If C(ephemeral), a new non-static address will be
used. If C(None), then no external address will
be used. To use an existing static IP address
specify address name.
default: "ephemeral"
service_account_email:
type: str
description:
- service account email
service_account_permissions:
type: list
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
- >
Available choices are:
C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
C(storage-rw), C(taskqueue), C(userinfo-email).
automatic_restart:
description:
- Defines whether the instance should be
automatically restarted when it is
terminated by Compute Engine.
type: bool
preemptible:
description:
- Defines whether the instance is preemptible.
type: bool
tags:
type: list
description:
- a comma-separated list of tags to associate with the instance
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
description:
type: str
description:
- description of instance template
disks:
type: list
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
nic_gce_struct:
type: list
description:
- Support passing in the GCE-specific
formatted networkInterfaces[] structure.
disks_gce_struct:
type: list
description:
- Support passing in the GCE-specific
formatted formatted disks[] structure. Case sensitive.
see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
project_id:
type: str
description:
- your GCE project ID
pem_file:
type: path
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
credentials_file:
type: path
description:
- path to the JSON file associated with the service account email
subnetwork_region:
type: str
description:
- Region that subnetwork resides in. (Required for subnetwork to successfully complete)
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- JSON credentials strongly preferred.
author: "Gwenael Pellen (@GwenaelPellenArkeup) <[email protected]>"
'''
EXAMPLES = '''
# Usage
- name: Create instance template named foo
community.general.gce_instance_template:
name: foo
size: n1-standard-1
image_family: ubuntu-1604-lts
state: present
project_id: "your-project-name"
credentials_file: "/path/to/your-key.json"
service_account_email: "[email protected]"
# Example Playbook
- name: Compute Engine Instance Template Examples
hosts: localhost
vars:
service_account_email: "[email protected]"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: Create instance template
community.general.gce_instance_template:
name: my-test-instance-template
size: n1-standard-1
image_family: ubuntu-1604-lts
state: present
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
- name: Delete instance template
community.general.gce_instance_template:
name: my-test-instance-template
size: n1-standard-1
image_family: ubuntu-1604-lts
state: absent
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
# Example playbook using disks_gce_struct
- name: Compute Engine Instance Template Examples
hosts: localhost
vars:
service_account_email: "[email protected]"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: Create instance template
community.general.gce_instance_template:
name: foo
size: n1-standard-1
state: present
project_id: "{{ project_id }}"
credentials_file: "{{ credentials_file }}"
service_account_email: "{{ service_account_email }}"
disks_gce_struct:
- device_name: /dev/sda
boot: true
autoDelete: true
initializeParams:
diskSizeGb: 30
diskType: pd-ssd
sourceImage: projects/debian-cloud/global/images/family/debian-8
'''
RETURN = '''
'''
import traceback
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
from ansible.module_utils._text import to_native
def get_info(inst):
"""Retrieves instance template information
"""
return({
'name': inst.name,
'extra': inst.extra,
})
def create_instance_template(module, gce):
"""Create an instance template
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
instance template information
"""
# get info from module
name = module.params.get('name')
size = module.params.get('size')
source = module.params.get('source')
image = module.params.get('image')
image_family = module.params.get('image_family')
disk_type = module.params.get('disk_type')
disk_auto_delete = module.params.get('disk_auto_delete')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
subnetwork_region = module.params.get('subnetwork_region')
can_ip_forward = module.params.get('can_ip_forward')
external_ip = module.params.get('external_ip')
service_account_permissions = module.params.get(
'service_account_permissions')
service_account_email = module.params.get('service_account_email')
on_host_maintenance = module.params.get('on_host_maintenance')
automatic_restart = module.params.get('automatic_restart')
preemptible = module.params.get('preemptible')
tags = module.params.get('tags')
metadata = module.params.get('metadata')
description = module.params.get('description')
disks_gce_struct = module.params.get('disks_gce_struct')
changed = False
# args of ex_create_instancetemplate
gce_args = dict(
name="instance",
size="f1-micro",
source=None,
image=None,
disk_type='pd-standard',
disk_auto_delete=True,
network='default',
subnetwork=None,
can_ip_forward=None,
external_ip='ephemeral',
service_accounts=None,
on_host_maintenance=None,
automatic_restart=None,
preemptible=None,
tags=None,
metadata=None,
description=None,
disks_gce_struct=None,
nic_gce_struct=None
)
gce_args['name'] = name
gce_args['size'] = size
if source is not None:
gce_args['source'] = source
if image:
gce_args['image'] = image
else:
if image_family:
image = gce.ex_get_image_from_family(image_family)
gce_args['image'] = image
else:
gce_args['image'] = "debian-8"
gce_args['disk_type'] = disk_type
gce_args['disk_auto_delete'] = disk_auto_delete
gce_network = gce.ex_get_network(network)
gce_args['network'] = gce_network
if subnetwork is not None:
gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
if can_ip_forward is not None:
gce_args['can_ip_forward'] = can_ip_forward
if external_ip == "ephemeral":
instance_external_ip = external_ip
elif external_ip == "none":
instance_external_ip = None
else:
try:
instance_external_ip = gce.ex_get_address(external_ip)
except GoogleBaseError as err:
# external_ip is name ?
instance_external_ip = external_ip
gce_args['external_ip'] = instance_external_ip
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP:
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
if service_account_email is not None:
ex_sa_perms.append({'email': str(service_account_email)})
else:
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
gce_args['service_accounts'] = ex_sa_perms
if on_host_maintenance is not None:
gce_args['on_host_maintenance'] = on_host_maintenance
if automatic_restart is not None:
gce_args['automatic_restart'] = automatic_restart
if preemptible is not None:
gce_args['preemptible'] = preemptible
if tags is not None:
gce_args['tags'] = tags
if disks_gce_struct is not None:
gce_args['disks_gce_struct'] = disks_gce_struct
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
gce_args['metadata'] = metadata
if description is not None:
gce_args['description'] = description
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
except ResourceNotFoundError:
try:
instance = gce.ex_create_instancetemplate(**gce_args)
changed = True
except GoogleBaseError as err:
module.fail_json(
msg='Unexpected error attempting to create instance {0}, error: {1}'
.format(
instance,
err.value
)
)
if instance:
json_data = get_info(instance)
else:
module.fail_json(msg="no instance template!")
return (changed, json_data, name)
def delete_instance_template(module, gce):
""" Delete instance template.
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
instance template information
"""
name = module.params.get('name')
current_state = "absent"
changed = False
# get instance template
instance = None
try:
instance = gce.ex_get_instancetemplate(name)
current_state = "present"
except GoogleBaseError as e:
json_data = dict(msg='instance template not exists: %s' % to_native(e),
exception=traceback.format_exc())
if current_state == "present":
rc = instance.destroy()
if rc:
changed = True
else:
module.fail_json(
msg='instance template destroy failed'
)
json_data = {}
return (changed, json_data, name)
def module_controller(module, gce):
''' Control module state parameter.
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
nothing
Exit:
AnsibleModule object exit with json data.
'''
json_output = dict()
state = module.params.get("state")
if state == "present":
(changed, output, name) = create_instance_template(module, gce)
json_output['changed'] = changed
json_output['msg'] = output
elif state == "absent":
(changed, output, name) = delete_instance_template(module, gce)
json_output['changed'] = changed
json_output['msg'] = output
module.exit_json(**json_output)
def check_if_system_state_would_be_changed(module, gce):
''' check_if_system_state_would_be_changed !
module : AnsibleModule object
gce: authenticated GCE libcloud driver
Returns:
system_state changed
'''
changed = False
current_state = "absent"
state = module.params.get("state")
name = module.params.get("name")
try:
gce.ex_get_instancetemplate(name)
current_state = "present"
except GoogleBaseError as e:
module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
exception=traceback.format_exc())
if current_state != state:
changed = True
if current_state == "absent":
if changed:
output = 'instance template {0} will be created'.format(name)
else:
output = 'nothing to do for instance template {0} '.format(name)
if current_state == "present":
if changed:
output = 'instance template {0} will be destroyed'.format(name)
else:
output = 'nothing to do for instance template {0} '.format(name)
return (changed, output)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
name=dict(required=True, aliases=['base_name']),
size=dict(default='f1-micro'),
source=dict(),
image=dict(),
image_family=dict(default='debian-8'),
disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
disk_auto_delete=dict(type='bool', default=True),
network=dict(default='default'),
subnetwork=dict(),
can_ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
automatic_restart=dict(type='bool', default=None),
preemptible=dict(type='bool', default=None),
tags=dict(type='list'),
metadata=dict(),
description=dict(),
disks=dict(type='list'),
nic_gce_struct=dict(type='list'),
project_id=dict(),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
subnetwork_region=dict(),
disks_gce_struct=dict(type='list')
),
mutually_exclusive=[['source', 'image']],
required_one_of=[['image', 'image_family']],
supports_check_mode=True
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(
msg='libcloud with GCE support (0.17.0+) required for this module')
try:
gce = gce_connect(module)
except GoogleBaseError as e:
module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
if module.check_mode:
(changed, output) = check_if_system_state_would_be_changed(module, gce)
module.exit_json(
changed=changed,
msg=output
)
else:
module_controller(module, gce)
if __name__ == '__main__':
main()
|
the-stack_106_25798 | __author__ = "Nicolas Delplanque"
__credits__ = ["Nicolas Delplanque"]
__version__ = "1.0.1"
__maintainer__ = "Nicolas Delplanque"
__email__ = "[email protected]"
from SPJRUD.SPJRUD import SPJRUD
from Representation.Relation import Relation
from Representation.Attribute import Attribute
from SPJRUD.Validation import *
class Project(SPJRUD):
def __init__(self, listOfParameters, subExpressionRight):
"""
Constructeur de l'opérateur Project
- listOfParameters = une liste de paramètres (string) à projeter
- subExpressionRight = une relation ou un SPJRUD
>> Project(['Param1', 'Param2', ...], Relation)
"""
if isinstance(subExpressionRight, Relation):
rel = subExpressionRight
self.SPJRUD = False
elif isinstance(subExpressionRight, SPJRUD):
rel = subExpressionRight.get_NewRelation()
self.SPJRUD = True
else:
raise Exception("SPJRUD -> Project : Le second parametre doit etre du type \'Relation\' ou etre un operateur SPJRUD")
valid_Project(listOfParameters, rel)
self.listOfParameters = listOfParameters
self.relation = rel
self.set_NewRelation()
self.set_SQL()
def __str__(self):
"""
Méthode qui retourne l'opérateur sous forme d'une chaine de caractère
"""
if not self.SPJRUD:
return "Project(['" + "', '".join(self.listOfParameters) + "'], Relation('" + self.relation.__str__() + "'))"
if self.SPJRUD:
return "Project(['" + "', '".join(self.listOfParameters) + "'], " + self.relation.__str__() + ")"
def set_NewRelation(self):
"""
Crée une nouvelle relation apres avoir effectuer l'opérateur Project
"""
newAttributes = []
for elem in self.listOfParameters:
for att in self.relation.get_Attributes():
if elem == att.get_Name():
newAttributes.append(att)
self.newRelation = Relation(self.relation.get_Name(), newAttributes, SPJRUD=self.__str__())
def get_NewRelation(self):
"""
Retourne la relation suite aux modifications effectuées
"""
return self.newRelation
def set_SQL(self):
"""
Enregistre la requête SQL dans la relation
"""
#elements n'ayant pas de double
sql1 = "SELECT " + ",".join(self.listOfParameters) + " FROM (" + self.relation.get_SQL() + ") GROUP BY " + ",".join(self.listOfParameters) + " HAVING COUNT(*) = 1"
#elements ayant un (des) double(s)
sql2 = "SELECT " + ",".join(self.listOfParameters) + " FROM (" + self.relation.get_SQL() + ") GROUP BY " + ",".join(self.listOfParameters) + " HAVING COUNT(*) > 1"
#union des deux requetes
sql = sql1 + " UNION " + sql2
self.newRelation.set_SQL(sql)
def get_SQL(self):
"""
Retourne la requête SQL de la nouvelle relation
"""
return self.newRelation.get_SQL() |
the-stack_106_25799 | """Nutanix Integration for Cortex XSOAR - Unit Tests file"""
import io
import json
from datetime import datetime
from typing import *
import pytest
from CommonServerPython import DemistoException, CommandResults
from NutanixHypervisor import Client
from NutanixHypervisor import USECS_ENTRIES_MAPPING
from NutanixHypervisor import nutanix_hypervisor_hosts_list_command, \
nutanix_hypervisor_vms_list_command, nutanix_hypervisor_vm_power_status_change_command, \
nutanix_hypervisor_task_results_get_command, nutanix_hpyervisor_alerts_list_command, \
nutanix_hypervisor_alert_acknowledge_command, nutanix_hypervisor_alert_resolve_command, \
nutanix_hypervisor_alerts_acknowledge_by_filter_command, \
nutanix_hypervisor_alerts_resolve_by_filter_command, get_alert_status_filter, \
get_optional_boolean_arg, convert_epoch_time_to_datetime, \
get_optional_time_parameter_as_epoch, add_iso_entries_to_dict, \
get_human_readable_headers, task_exists
MOCKED_BASE_URL = 'https://prefix:11111/PrismGateway/services/rest/v2.0'
client = Client(base_url=MOCKED_BASE_URL, verify=False, proxy=False, auth=('fake_username', 'fake_password'))
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
command_tests_data = util_load_json('test_data/test_command_data.json')
@pytest.mark.parametrize('args, argument_name, expected',
[({'resolved': 'true'}, 'resolved', True),
({'resolved': 'false'}, 'resolved', False),
({}, 'resolved', None),
])
def test_get_optional_boolean_arg_valid(args, argument_name, expected):
"""
Given:
- Demisto arguments.
- Argument name to extract from Demisto arguments as boolean.
When:
- Case a: Argument exists, and is true.
- Case b: Argument exists, and is false.
- Case b: Argument does not exist.
Then:
- Case a: Ensure that True is returned.
- Case b: Ensure that False is returned.
- Case c: Ensure that None is returned.
"""
assert (get_optional_boolean_arg(args, argument_name)) == expected
@pytest.mark.parametrize('args, argument_name, expected_error_message',
[({'resolved': 'unknown_boolean_value'}, 'resolved',
'Argument does not contain a valid boolean-like value'),
({'resolved': 123}, 'resolved',
'Argument is neither a string nor a boolean'),
])
def test_get_optional_boolean_arg_invalid_argument(args, argument_name, expected_error_message):
"""
Given:
- Demisto arguments.
- Argument name to extract from Demisto arguments as boolean.
When:
- Case a: Argument is a non boolean string.
- Case b: Argument is a number.
Then:
- Case a: Ensure that DemistoException is thrown with error message which indicates that string cannot be
parsed to boolean.
- Case b: Ensure that DemistoException is thrown with error message which indicates that type of the argument
is not bool or string that can be parsed.
"""
with pytest.raises(ValueError, match=expected_error_message):
get_optional_boolean_arg(args, argument_name)
@pytest.mark.parametrize('arg, expected',
[('2020-11-22T16:31:14', 1606062674000000),
(None, None),
])
def test_get_optional_time_parameter_valid_time_argument(arg, expected):
"""
Given:
- Demisto arguments.
- Argument of type time to extract from Demisto arguments as epoch time.
When:
- Case a: Argument exists, and has the expected date format.
- Case b: Argument does not exist.
Then:
- Case a: Ensure that the corresponding epoch time is returned.
- Case b: Ensure that None is returned.
"""
assert (get_optional_time_parameter_as_epoch(arg)) == expected
@pytest.mark.parametrize('command_function, args, url_suffix, response, expected',
[(nutanix_hypervisor_hosts_list_command,
command_tests_data['nutanix-hypervisor-hosts-list']['args'],
command_tests_data['nutanix-hypervisor-hosts-list']['suffix'],
command_tests_data['nutanix-hypervisor-hosts-list']['response'],
command_tests_data['nutanix-hypervisor-hosts-list']['expected']),
(nutanix_hypervisor_vms_list_command,
command_tests_data['nutanix-hypervisor-vms-list']['args'],
command_tests_data['nutanix-hypervisor-vms-list']['suffix'],
command_tests_data['nutanix-hypervisor-vms-list']['response'],
command_tests_data['nutanix-hypervisor-vms-list']['expected']),
(nutanix_hpyervisor_alerts_list_command,
command_tests_data['nutanix-hypervisor-alerts-list']['args'],
command_tests_data['nutanix-hypervisor-alerts-list']['suffix'],
command_tests_data['nutanix-hypervisor-alerts-list']['response'],
command_tests_data['nutanix-hypervisor-alerts-list']['expected'])
])
def test_commands_get_methods(requests_mock, command_function: Callable[[Client, Dict], CommandResults], args: Dict,
url_suffix: str, response: Dict, expected: Dict):
"""
Given:
- command function.
- Demisto arguments.
- url suffix of the Nutanix service endpoint that the command function will use (needed to mock the request).
- response returned from Nutanix.
- expected CommandResults object to be returned from the command function.
When:
- Executing a command
Then:
- Ensure that the expected CommandResults object is returned by the command function.
"""
requests_mock.get(
f'{MOCKED_BASE_URL}/{url_suffix}',
json=response
)
expected_command_results = CommandResults(
outputs_prefix=expected.get('outputs_prefix'),
outputs_key_field=expected.get('outputs_key_field'),
outputs=expected.get('outputs')
)
returned_command_results = command_function(client, args)
assert returned_command_results.outputs_prefix == expected_command_results.outputs_prefix
assert returned_command_results.outputs_key_field == expected_command_results.outputs_key_field
assert returned_command_results.outputs == expected_command_results.outputs
@pytest.mark.parametrize('command_function, args, url_suffix, response, expected',
[(nutanix_hypervisor_vm_power_status_change_command,
command_tests_data['nutanix-hypervisor-vm-powerstatus-change']['args'],
command_tests_data['nutanix-hypervisor-vm-powerstatus-change']['suffix'],
command_tests_data['nutanix-hypervisor-vm-powerstatus-change']['response'],
command_tests_data['nutanix-hypervisor-vm-powerstatus-change']['expected']),
(nutanix_hypervisor_task_results_get_command,
command_tests_data['nutanix-hypervisor-task-results-get']['args'],
command_tests_data['nutanix-hypervisor-task-results-get']['suffix'],
command_tests_data['nutanix-hypervisor-task-results-get']['response'],
command_tests_data['nutanix-hypervisor-task-results-get']['expected']),
(nutanix_hypervisor_alert_acknowledge_command,
command_tests_data['nutanix-hypervisor-alert-acknowledge']['args'],
command_tests_data['nutanix-hypervisor-alert-acknowledge']['suffix'],
command_tests_data['nutanix-hypervisor-alert-acknowledge']['response'],
command_tests_data['nutanix-hypervisor-alert-acknowledge']['expected']),
(nutanix_hypervisor_alert_resolve_command,
command_tests_data['nutanix-hypervisor-alert-resolve']['args'],
command_tests_data['nutanix-hypervisor-alert-resolve']['suffix'],
command_tests_data['nutanix-hypervisor-alert-resolve']['response'],
command_tests_data['nutanix-hypervisor-alert-resolve']['expected']),
(nutanix_hypervisor_alerts_acknowledge_by_filter_command,
command_tests_data['nutanix-hypervisor-alerts-acknowledge-by-filter']['args'],
command_tests_data['nutanix-hypervisor-alerts-acknowledge-by-filter']['suffix'],
command_tests_data['nutanix-hypervisor-alerts-acknowledge-by-filter']['response'],
command_tests_data['nutanix-hypervisor-alerts-acknowledge-by-filter']['expected']),
(nutanix_hypervisor_alerts_resolve_by_filter_command,
command_tests_data['nutanix-hypervisor-alerts-resolve-by-filter']['args'],
command_tests_data['nutanix-hypervisor-alerts-resolve-by-filter']['suffix'],
command_tests_data['nutanix-hypervisor-alerts-resolve-by-filter']['response'],
command_tests_data['nutanix-hypervisor-alerts-resolve-by-filter']['expected']),
])
def test_commands_post_methods(requests_mock, command_function: Callable[[Client, Dict], CommandResults], args: Dict,
url_suffix: str, response: Dict, expected: Dict):
"""
Given:
- command function.
- Demisto arguments.
- url suffix of the Nutanix service endpoint that the command function will use (needed to mock the request).
- response returned from Nutanix.
- expected CommandResults object to be returned from the command function.
When:
- Executing a command
Then:
- Ensure that the expected CommandResults object is returned by the command function.
"""
requests_mock.post(
f'{MOCKED_BASE_URL}/{url_suffix}',
json=response
)
expected_command_results = CommandResults(
outputs_prefix=expected.get('outputs_prefix'),
outputs_key_field=expected.get('outputs_key_field'),
outputs=expected.get('outputs')
)
returned_command_results = command_function(client, args)
assert returned_command_results.outputs_prefix == expected_command_results.outputs_prefix
assert returned_command_results.outputs_key_field == expected_command_results.outputs_key_field
assert returned_command_results.outputs == expected_command_results.outputs
def test_fetch_incidents(requests_mock):
"""
Given:
- Demisto parameters.
- Demisto arguments.
- Last run of fetch-incidents
When:
- Fetching incidents, not first run. last run fetch time is before both alerts.
Then:
Ensure that alerts are returned as incidents.
Ensure that last run is set with latest alert time stamp.
"""
last_run = {'last_fetch_epoch_time': 1610360118147914}
requests_mock.get(
f'{MOCKED_BASE_URL}/alerts?start_time_in_usecs=1610360118147914',
json=command_tests_data['nutanix-fetch-incidents']['response']
)
current_time = int(datetime.utcnow().timestamp() * 1000000)
incidents, next_run = client.fetch_incidents(
params={},
last_run=last_run
)
incidents_raw_json = [json.loads(incident['rawJSON']) for incident in incidents]
assert next_run.get('last_fetch_epoch_time') >= current_time
assert incidents_raw_json == command_tests_data['nutanix-fetch-incidents']['expected']['outputs']
@pytest.mark.parametrize('true_value, false_value, alert_status_filters, expected',
[('Resolved', 'Unresolved', ['Resolved', 'Acknowledged'], True),
('Resolved', 'Unresolved', ['Unresolved', 'Acknowledged'], False),
('Resolved', 'Unresolved', ['Acknowledged'], None),
('Resolved', 'Unresolved', None, None)
])
def test_get_alert_status_filter_valid_cases(true_value, false_value, alert_status_filters, expected):
"""
Given:
- The argument name which corresponds for True value inside 'alert_status_filters' list.
- The argument name which corresponds for False value inside 'alert_status_filters' list.
- Alert status filters, contains all the selects for filters done by user.
When:
- Case a: User selected argument that corresponds for True value.
- Case b: User selected argument that corresponds for False value.
- Case c: User did not select argument that corresponds to true or false value.
Then:
- Case a: Ensure True is returned.
- Case b: Ensure False is returned.
- Case c: Ensure None is returned.
"""
assert get_alert_status_filter(true_value, false_value, alert_status_filters) == expected
@pytest.mark.parametrize('true_value, false_value, alert_status_filters',
[('Resolved', 'Unresolved', ['Resolved', 'Unresolved']),
('Acknowledged', 'Unacknowledged', ['Acknowledged', 'Unacknowledged']),
('Auto Resolved', 'Not Auto Resolved', ['Auto Resolved', 'Not Auto Resolved'])
])
def test_get_alert_status_filter_invalid_case(true_value, false_value, alert_status_filters):
"""
Given:
- The argument name which corresponds for True value inside 'alert_status_filters' list.
- The argument name which corresponds for False value inside 'alert_status_filters' list.
- Alert status filters, contains all the selects for filters done by user.
When:
- Case a: User selected argument that corresponds for both True and False values.
- Case b: User selected argument that corresponds for both True and False values.
- Case c: User selected argument that corresponds for both True and False values.
Then:
- Case a: Ensure DemistoException is thrown with the expected message error.
- Case b: Ensure DemistoException is thrown with the expected message error.
- Case c: Ensure DemistoException is thrown with the expected message error.
"""
with pytest.raises(DemistoException,
match=f'Invalid alert status filters configurations, only one of {true_value},{false_value} '
'can be chosen.'):
get_alert_status_filter(true_value, false_value, alert_status_filters)
@pytest.mark.parametrize('epoch_time, expected',
[(0, None),
(None, None),
(1600000000000000, '2020-09-13T12:26:40+00:00')
])
def test_convert_epoch_time_to_datetime_valid_cases(epoch_time, expected):
"""
Given:
- Time to be converted to date time in UTC timezone.
When:
- Case a: Epoch time is 0.
- Case b: Epoch time is not given.
- Case c: Valid epoch time is given.
Then:
- Case a: Ensure None is returned.
- Case b: Ensure None is returned.
- Case c: Ensure the corresponding date time string is returned.
"""
assert convert_epoch_time_to_datetime(epoch_time) == expected
def test_add_iso_entries_to_dict():
"""
Given:
- Dict containing entries with epoch time.
When:
- Adding to entries with epoch time entries with iso time.
Then:
- All 'usecs' keys in the dict are replaced with 'iso time' entries with correct iso values.
"""
tested_dict = {usec_entry: 1600000000000000 for usec_entry in USECS_ENTRIES_MAPPING.keys()}
tested_dict['host_name'] = 'Nutanix Host'
add_iso_entries_to_dict([tested_dict])
assert tested_dict['host_name'] == 'Nutanix Host'
assert all(
tested_dict.get(iso_entry) == '2020-09-13T12:26:40+00:00' for iso_entry in USECS_ENTRIES_MAPPING.values())
assert len(tested_dict) == (1 + (len(USECS_ENTRIES_MAPPING) * 2))
@pytest.mark.parametrize('outputs, expected_outputs',
[([{1: 2, 3: 4, 'a': 'b'}], [1, 3, 'a']),
([{'a': {2: 3}}], []),
([{1: 2, 3: 4, 'a': {1: 2}}, {1: 2, 'abc': 'def', 'lst': [1, {2: 3}, 3, [4, 5, 6]]}], [1]),
([{'a': [[[[[[{1: 2}]]]]]]}], []),
([], [])
])
def test_get_human_readable_headers(outputs, expected_outputs):
"""
Given:
- List of outputs.
When:
- Creating human readable keys by given outputs
Then:
- All keys that don't contains inner dicts are returned.
"""
readable_headers = get_human_readable_headers(outputs)
assert all(readable_header in expected_outputs for readable_header in readable_headers)
assert len(readable_headers) == len(expected_outputs)
def test_task_id_exists_task_exists(requests_mock):
"""
Given:
- Task Id.
- Nutanix client.
When:
Task to be polled exists in Nutanix.
Then:
True is returned
"""
task_id = 'abcd1234-ab12-cd34-1a2s3d5f7hh4'
requests_mock.get(
f'{MOCKED_BASE_URL}/tasks/{task_id}',
json={}
)
assert task_exists(client, task_id)
def test_task_id_exists_task_does_not_exist(requests_mock):
"""
Given:
- Task Id.
- Nutanix client.
When:
Task to be polled does not exist in Nutanix.
Then:
False is returned
"""
task_id = 'abcd1234-ab12-cd34-1a2s3d5f7hh4'
requests_mock.get(
f'{MOCKED_BASE_URL}/tasks/{task_id}',
exc=DemistoException(f'Task with id {task_id} is not found')
)
assert not task_exists(client, task_id)
def test_task_id_exists_unexpected_exception(requests_mock):
"""
Given:
- Task Id.
- Nutanix client.
When:
Unexpected exception is thrown during call to Nutanix service.
Then:
The unexpected exception is raised and not passed silently
"""
task_id = 'abcd1234-ab12-cd34-1a2s3d5f7hh4'
requests_mock.get(
f'{MOCKED_BASE_URL}/tasks/{task_id}',
exc=DemistoException('Unexpected exception')
)
with pytest.raises(DemistoException, match='Unexpected exception'):
task_exists(client, task_id)
|
the-stack_106_25800 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.static import Program, program_guard
class TestScaleOp(OpTest):
def setUp(self):
self.op_type = "scale"
self.python_api = paddle.scale
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.attrs = {'scale': -2.3}
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def init_dtype_type(self):
pass
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
class TestScaleOpScaleVariable(OpTest):
def setUp(self):
self.op_type = "scale"
self.python_api = paddle.scale
self.dtype = np.float64
self.init_dtype_type()
self.scale = -2.3
self.inputs = {
'X': np.random.random((10, 10)).astype(self.dtype),
'ScaleTensor': np.array([self.scale]).astype('float64')
}
self.attrs = {}
self.outputs = {'Out': self.inputs['X'] * self.dtype(self.scale)}
def init_dtype_type(self):
pass
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
class TestScaleOpSelectedRows(unittest.TestCase):
def init_dtype_type(self):
pass
def check_with_place(self, place, in_name, out_name):
scope = core.Scope()
self.dtype = np.float64
self.init_dtype_type()
# create and initialize Grad Variable
in_height = 10
in_rows = [0, 4, 7]
in_row_numel = 12
scale = 2.0
in_selected_rows = scope.var(in_name).get_selected_rows()
in_selected_rows.set_height(in_height)
in_selected_rows.set_rows(in_rows)
in_array = np.random.random(
(len(in_rows), in_row_numel)).astype(self.dtype)
in_tensor = in_selected_rows.get_tensor()
in_tensor.set(in_array, place)
# create and initialize Param Variable
out_selected_rows = scope.var(out_name).get_selected_rows()
out_tensor = out_selected_rows.get_tensor()
out_tensor._set_dims(in_tensor._get_dims())
# create and run sgd operator
scale_op = Operator("scale", X=in_name, Out=out_name, scale=scale)
scale_op.run(scope, place)
# get and compare result
out_height = out_selected_rows.height()
out_rows = out_selected_rows.rows()
result_array = np.array(out_tensor)
assert (in_array * scale == result_array).all()
assert in_height == out_height
assert in_rows == out_rows
def test_scale_selected_rows(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.check_with_place(place, 'in', 'out')
def test_scale_selected_rows_inplace(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.check_with_place(place, 'in', 'in')
class TestScaleRaiseError(unittest.TestCase):
def test_errors(self):
def test_type():
fluid.layers.scale([10])
self.assertRaises(TypeError, test_type)
# Add FP16 test
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestScaleFp16Op(TestScaleOp):
def init_dtype_type(self):
self.dtype = np.float16
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=0.002, check_eager=True)
def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(
place, ["X"], "Out", max_relative_error=0.05, check_eager=True)
class TestScaleBF16Op(OpTest):
def setUp(self):
self.op_type = "scale"
self.python_api = paddle.scale
self.dtype = np.uint16
self.attrs = {'scale': -2.3}
x = np.random.random((10, 10)).astype(np.float32)
out = x * np.float32(self.attrs['scale'])
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': convert_float_to_uint16(out)}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.8, check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows):
def init_dtype_type(self):
self.dtype = np.float16
def test_scale_selected_rows(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_with_place(place, 'in', 'out')
def test_scale_selected_rows_inplace(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_with_place(place, 'in', 'in')
class TestScaleApiStatic(unittest.TestCase):
def _executed_api(self, x, scale=1.0, bias=0.0):
return paddle.scale(x, scale, bias)
def test_api(self):
paddle.enable_static()
input = np.random.random([2, 25]).astype("float32")
main_prog = Program()
with program_guard(main_prog, Program()):
x = paddle.static.data(name="x", shape=[2, 25], dtype="float32")
out = self._executed_api(x, scale=2.0, bias=3.0)
exe = paddle.static.Executor(place=paddle.CPUPlace())
out = exe.run(main_prog, feed={"x": input}, fetch_list=[out])
self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True)
class TestScaleInplaceApiStatic(TestScaleApiStatic):
def _executed_api(self, x, scale=1.0, bias=0.0):
return x.scale_(scale, bias)
class TestScaleApiDygraph(unittest.TestCase):
def _executed_api(self, x, scale=1.0, bias=0.0):
return paddle.scale(x, scale, bias)
def test_api(self):
paddle.disable_static()
input = np.random.random([2, 25]).astype("float32")
x = paddle.to_tensor(input)
out = self._executed_api(x, scale=2.0, bias=3.0)
self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True)
paddle.enable_static()
class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
def _executed_api(self, x, scale=1.0, bias=0.0):
return x.scale_(scale, bias)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_25802 | # encoding=utf-8
import logging
from typing import Any
from hamcrest.core.core.isanything import IsAnything
from hamcrest.core.description import Description
from hamcrest.core.matcher import Matcher
logger = logging.getLogger(__name__)
def append_matcher_description(
field_matcher: Matcher[Any], field_name: str, description: Description
) -> None:
if not isinstance(field_matcher, IsAnything):
description.append_text(f" {field_name}: ").append_description_of(field_matcher)
def describe_field_mismatch(
field_matcher: Matcher[Any],
field_name: str,
actual_value: Any,
mismatch_description: Description,
) -> None:
if not isinstance(field_matcher, IsAnything) and not field_matcher.matches(actual_value):
mismatch_description.append_text(f" {field_name}: ")
field_matcher.describe_mismatch(actual_value, mismatch_description)
def describe_field_match(
field_matcher: Matcher[Any],
field_name: str,
actual_value: Any,
match_description: Description,
) -> None:
if not isinstance(field_matcher, IsAnything) and field_matcher.matches(actual_value):
match_description.append_text(f" {field_name}: ")
field_matcher.describe_match(actual_value, match_description)
|
the-stack_106_25803 |
import auxiliary.process as process
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import keywords
import random
import json
# mode = 'any'
# mode = "all"
sns.set(style="whitegrid", rc={'figure.figsize':(20,10)})
random.seed(1)
def selectHost(timestep, time):
timestep = json.loads(timestep)
out = []
# print(len(timestep.items()))
for hostId, host in timestep.items():
host_ndna = 0
host_unmut = 0
for mitoId, mito in host['subcells'].items():
host_ndna += mito['n DNA']
if mito['n DNA'] > 0:
host_unmut += mito['unmut'] *mito['n DNA']
out.append({"host":hostId, "parent":host["parent"], "time":time, "n DNA":host_ndna, "unmut":host_unmut, "fission rate":host['fission_rate'],"fusion_rate":host['fusion_rate'], "rep":host['rep']})
return [out]
def gethostdct(line, host):
return list(filter(lambda living: living['host'] == host, line))[0]
dfs = process.get(picklefname="hosts4000.pickle",force=False, stop=4000, folder=keywords.getfoldername(), reverse=True, selector=selectHost, verbose=True, sortbykeywordix=keywords.getkeywordix())
alldf = []
for path in dfs:
pre_df = []
params = {}
for key, val in dfs[path].items():
if key != 'data':
params[key]=float(val)
host = None
parent =None
# print(dfs[path]['data'])
for line in dfs[path]['data']:
# print(line)
if type(line) is dict:
line = [line]
if host == None:
current = random.choice(line)
host = current['host']
parent = current['parent']
# print(host, "newly chosen")
if host not in [dct['host'] for dct in line]:
# print(line)
host = str(parent)
parent = gethostdct(line, host)['parent']
if host == "-1" :
break
current = gethostdct(line, host)
# print(current)
current.update(params)
pre_df.append(current)
df = pd.DataFrame(pre_df)
# df = df.sort_values(by='time')
# print(df)
fig, ax = plt.subplots()
# df['unmut'] /= df['n DNA']
# g = sns.lineplot(data=df, x='time', y='unmut', palette="viridis", hue='n DNA', legend=None, ax=ax, sort=False, estimator=None)
g = sns.scatterplot(data=df, x='time', y='unmut', palette="viridis", hue='n DNA', legend=None, ax=ax, s=10, edgecolor=None)
title = "Unmutated DNA through last 4000 timesteps in single lineage\n"
norm = plt.Normalize(df['n DNA'].min(), df['n DNA'].max())
sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
sm.set_array([])
ax.figure.colorbar(sm)
fig.tight_layout()
plt.savefig("current_processing/perrun/integer_unmut/last 4000 timesteps of "+ path[-4:] +".png", dpi=200)
plt.close()
# print("just trying one currently!")
# break
df['path'] = path
df['time'] = pd.Series(range(0,df.shape[0]))
alldf.append(df)
alldf= pd.concat(alldf)
print(alldf)
g = sns.lineplot(data=alldf, x='time', y='unmut', palette="colorblind", hue='path',lw=1, units="path", estimator=None, legend=None)
plt.savefig("current_processing/allancestortrace_unmut.png", dpi=600)
|
the-stack_106_25806 | #-*- coding: utf-8 -*-
import os
from django.test import TestCase
from django.core.urlresolvers import reverse
import django.core.files
from django.contrib.admin import helpers
from django.contrib import admin
from django.contrib.auth.models import User
from django.conf import settings
from filer.models.filemodels import File
from filer.models.foldermodels import Folder, FolderPermission
from filer.models.imagemodels import Image
from filer.models.clipboardmodels import Clipboard
from filer.models.virtualitems import FolderRoot
from filer.models import tools
from filer.admin.folderadmin import FolderAdmin
from filer.tests.helpers import (create_superuser, create_folder_structure,
create_image, SettingsOverride)
from filer import settings as filer_settings
class FilerFolderAdminUrlsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.client.logout()
def test_filer_app_index_get(self):
response = self.client.get(reverse('admin:app_list', args=('filer',)))
self.assertEqual(response.status_code, 200)
def test_filer_make_root_folder_get(self):
response = self.client.get(reverse('admin:filer-directory_listing-make_root_folder')+"?_popup=1")
self.assertEqual(response.status_code, 200)
def test_filer_make_root_folder_post(self):
FOLDER_NAME = "root folder 1"
self.assertEqual(Folder.objects.count(), 0)
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'),
{
"name":FOLDER_NAME,
})
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.all()[0].name, FOLDER_NAME)
#TODO: not sure why the status code is 200
self.assertEqual(response.status_code, 200)
def test_filer_remember_last_opened_directory(self):
folder = Folder.objects.create(name='remember me please')
get_last_folder = lambda: self.client.get(reverse('admin:filer-directory_listing-last'), follow=True)
self.client.get(reverse('admin:filer-directory_listing', kwargs={'folder_id': folder.id}))
self.assertEqual(int(self.client.session['filer_last_folder_id']), folder.id)
self.assertEqual(get_last_folder().context['folder'], folder)
# let's test fallback
folder.delete()
self.assertTrue(isinstance(get_last_folder().context['folder'], FolderRoot))
def test_filer_directory_listing_root_empty_get(self):
response = self.client.post(reverse('admin:filer-directory_listing-root'))
self.assertEqual(response.status_code, 200)
def test_filer_directory_listing_root_get(self):
create_folder_structure(depth=3, sibling=2, parent=None)
response = self.client.post(reverse('admin:filer-directory_listing-root'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['folder'].children.count(), 6)
def test_validate_no_duplcate_folders(self):
FOLDER_NAME = "root folder 1"
self.assertEqual(Folder.objects.count(), 0)
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'), {
"name":FOLDER_NAME,
"_popup": 1
})
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.all()[0].name, FOLDER_NAME)
# and create another one
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'),
{"name":FOLDER_NAME, "_popup": 1})
# second folder didn't get created
self.assertEqual(Folder.objects.count(), 1)
self.assertContains(response, 'Folder with this name already exists')
def test_validate_no_duplcate_folders_on_rename(self):
self.assertEqual(Folder.objects.count(), 0)
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'), {
"name": "foo",
"_popup": 1})
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.all()[0].name, "foo")
# and create another one
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'), {
"name": "bar",
"_popup": 1})
self.assertEqual(Folder.objects.count(), 2)
bar = Folder.objects.get(name="bar")
response = self.client.post("/admin/filer/folder/%d/" % bar.pk, {
"name": "foo",
"_popup": 1})
self.assertContains(response, 'Folder with this name already exists')
# refresh from db and validate that it's name didn't change
bar = Folder.objects.get(pk=bar.pk)
self.assertEqual(bar.name, "bar")
def test_change_folder_owner_keep_name(self):
folder = Folder.objects.create(name='foobar')
another_superuser = User.objects.create_superuser(
'gigi', '[email protected]', 'secret')
response = self.client.post('/admin/filer/folder/%d/' % folder.pk, {
'owner': another_superuser.pk,
'name': 'foobar',
'_continue': 'Save and continue editing'})
# succesfful POST returns a redirect
self.assertEqual(response.status_code, 302)
folder = Folder.objects.get(pk=folder.pk)
self.assertEqual(folder.owner.pk, another_superuser.pk)
class FilerImageAdminUrlsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.client.logout()
class FilerClipboardAdminUrlsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, self.image_name)
self.img.save(self.filename, 'JPEG')
def tearDown(self):
self.client.logout()
os.remove(self.filename)
for img in Image.objects.all():
img.delete()
def test_filer_upload_file(self, extra_headers={}):
self.assertEqual(Image.objects.count(), 0)
file_obj = django.core.files.File(open(self.filename, 'rb'))
response = self.client.post(
reverse('admin:filer-ajax_upload'),
{'Filename': self.image_name, 'Filedata': file_obj, 'jsessionid': self.client.session.session_key,},
**extra_headers
)
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(Image.objects.all()[0].original_filename, self.image_name)
def test_filer_ajax_upload_file(self):
self.assertEqual(Image.objects.count(), 0)
file_obj = django.core.files.File(open(self.filename, 'rb'))
response = self.client.post(
reverse('admin:filer-ajax_upload')+'?filename=%s' % self.image_name,
data=file_obj.read(),
content_type='application/octet-stream',
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
)
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(Image.objects.all()[0].original_filename, self.image_name)
class BulkOperationsMixin(object):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR,
self.image_name)
self.img.save(self.filename, 'JPEG')
self.create_src_and_dst_folders()
self.folder = Folder.objects.create(name="root folder", parent=None)
self.sub_folder1 = Folder.objects.create(name="sub folder 1", parent=self.folder)
self.sub_folder2 = Folder.objects.create(name="sub folder 2", parent=self.folder)
self.image_obj = self.create_image(self.src_folder)
self.create_file(self.folder)
self.create_file(self.folder)
self.create_image(self.folder)
self.create_image(self.sub_folder1)
self.create_file(self.sub_folder1)
self.create_file(self.sub_folder1)
self.create_image(self.sub_folder2)
self.create_image(self.sub_folder2)
def tearDown(self):
self.client.logout()
os.remove(self.filename)
for f in File.objects.all():
f.delete()
for folder in Folder.objects.all():
folder.delete()
def create_src_and_dst_folders(self):
self.src_folder = Folder(name="Src", parent=None)
self.src_folder.save()
self.dst_folder = Folder(name="Dst", parent=None)
self.dst_folder.save()
def create_image(self, folder, filename=None):
filename = filename or 'test_image.jpg'
file_obj = django.core.files.File(open(self.filename, 'rb'), name=filename)
image_obj = Image.objects.create(owner=self.superuser, original_filename=self.image_name, file=file_obj, folder=folder)
image_obj.save()
return image_obj
def create_file(self, folder, filename=None):
filename = filename or 'test_file.dat'
file_data = django.core.files.base.ContentFile('some data')
file_data.name = filename
file_obj = File.objects.create(owner=self.superuser, original_filename=filename, file=file_data, folder=folder)
file_obj.save()
return file_obj
class FilerBulkOperationsTests(BulkOperationsMixin, TestCase):
def test_move_files_and_folders_action(self):
# TODO: Test recursive (files and folders tree) move
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'move_files_and_folders',
'post': 'yes',
'destination': self.dst_folder.id,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 0)
self.assertEqual(self.dst_folder.files.count(), 1)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.dst_folder.id,
})
response = self.client.post(url, {
'action': 'move_files_and_folders',
'post': 'yes',
'destination': self.src_folder.id,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
def test_validate_no_duplicate_folders_on_move(self):
"""Create the following folder hierarchy:
root
|
|--foo
| |-bar
|
|--bar
and try to move the owter bar in foo. This has to fail since it would result
in two folders with the same name and parent.
"""
root = Folder.objects.create(name='root', owner=self.superuser)
foo = Folder.objects.create(name='foo', parent=root, owner=self.superuser)
bar = Folder.objects.create(name='bar', parent=root, owner=self.superuser)
foos_bar = Folder.objects.create(name='bar', parent=foo, owner=self.superuser)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': root.pk,
})
response = self.client.post(url, {
'action': 'move_files_and_folders',
'post': 'yes',
'destination': foo.pk,
helpers.ACTION_CHECKBOX_NAME: 'folder-%d' % (bar.pk,),
})
# refresh from db and validate that it hasn't been moved
bar = Folder.objects.get(pk=bar.pk)
self.assertEqual(bar.parent.pk, root.pk)
def test_move_to_clipboard_action(self):
# TODO: Test recursive (files and folders tree) move
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'move_to_clipboard',
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 0)
self.assertEqual(self.dst_folder.files.count(), 0)
clipboard = Clipboard.objects.get(user=self.superuser)
self.assertEqual(clipboard.files.count(), 1)
tools.move_files_from_clipboard_to_folder(clipboard, self.src_folder)
tools.discard_clipboard(clipboard)
self.assertEqual(clipboard.files.count(), 0)
self.assertEqual(self.src_folder.files.count(), 1)
def test_files_set_public_action(self):
self.image_obj.is_public = False
self.image_obj.save()
self.assertEqual(self.image_obj.is_public, False)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'files_set_public',
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.image_obj = Image.objects.get(id=self.image_obj.id)
self.assertEqual(self.image_obj.is_public, True)
def test_files_set_private_action(self):
self.image_obj.is_public = True
self.image_obj.save()
self.assertEqual(self.image_obj.is_public, True)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'files_set_private',
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.image_obj = Image.objects.get(id=self.image_obj.id)
self.assertEqual(self.image_obj.is_public, False)
self.image_obj.is_public = True
self.image_obj.save()
def test_copy_files_and_folders_action(self):
# TODO: Test recursive (files and folders tree) copy
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
self.assertEqual(self.image_obj.original_filename, 'test_file.jpg')
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'copy_files_and_folders',
'post': 'yes',
'suffix': 'test',
'destination': self.dst_folder.id,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 1)
self.assertEqual(self.src_folder.files[0].id, self.image_obj.id)
dst_image_obj = self.dst_folder.files[0]
self.assertEqual(dst_image_obj.original_filename, 'test_filetest.jpg')
class FilerDeleteOperationTests(BulkOperationsMixin, TestCase):
def test_delete_files_or_folders_action(self):
self.assertNotEqual(File.objects.count(), 0)
self.assertNotEqual(Image.objects.count(), 0)
self.assertNotEqual(Folder.objects.count(), 0)
url = reverse('admin:filer-directory_listing-root')
folders = []
for folder in FolderRoot().children.all():
folders.append('folder-%d' % (folder.id,))
response = self.client.post(url, {
'action': 'delete_files_or_folders',
'post': 'yes',
helpers.ACTION_CHECKBOX_NAME: folders,
})
self.assertEqual(File.objects.count(), 0)
self.assertEqual(Folder.objects.count(), 0)
def test_delete_files_or_folders_action_with_mixed_types(self):
# add more files/images so we can test the polymorphic queryset with multiple types
self.create_file(folder=self.src_folder)
self.create_image(folder=self.src_folder)
self.create_file(folder=self.src_folder)
self.assertNotEqual(File.objects.count(), 0)
self.assertNotEqual(Image.objects.count(), 0)
url = reverse('admin:filer-directory_listing', args=(self.folder.id,))
folders = []
for f in File.objects.filter(folder=self.folder):
folders.append('file-%d' % (f.id,))
folders.append('folder-%d' % self.sub_folder1.id)
response = self.client.post(url, {
'action': 'delete_files_or_folders',
'post': 'yes',
helpers.ACTION_CHECKBOX_NAME: folders,
})
self.assertEqual(File.objects.filter(folder__in=[self.folder.id, self.sub_folder1.id]).count(), 0)
class FilerResizeOperationTests(BulkOperationsMixin, TestCase):
def test_resize_images_action(self):
# TODO: Test recursive (files and folders tree) processing
self.assertEqual(self.image_obj.width, 800)
self.assertEqual(self.image_obj.height, 600)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'resize_images',
'post': 'yes',
'width': 42,
'height': 42,
'crop': True,
'upscale': False,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.image_obj = Image.objects.get(id=self.image_obj.id)
self.assertEqual(self.image_obj.width, 42)
self.assertEqual(self.image_obj.height, 42)
class PermissionAdminTest(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.client.logout()
def test_render_add_view(self):
"""
Really stupid and simple test to see if the add Permission view can be rendered
"""
response = self.client.get(reverse('admin:filer_folderpermission_add'))
self.assertEqual(response.status_code, 200)
class FolderListingTest(TestCase):
def setUp(self):
superuser = create_superuser()
self.staff_user = User.objects.create_user(
username='joe', password='x', email='[email protected]')
self.staff_user.is_staff = True
self.staff_user.save()
self.parent = Folder.objects.create(name='bar', parent=None, owner=superuser)
self.foo_folder = Folder.objects.create(name='foo', parent=self.parent, owner=self.staff_user)
self.bar_folder = Folder.objects.create(name='bar', parent=self.parent, owner=superuser)
self.baz_folder = Folder.objects.create(name='baz', parent=self.parent, owner=superuser)
file_data = django.core.files.base.ContentFile('some data')
file_data.name = 'spam'
self.spam_file = File.objects.create(
owner=superuser, original_filename='spam',
file=file_data, folder=self.parent)
self.client.login(username='joe', password='x')
def test_with_permissions_disabled(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=False):
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees all items: FOO, BAR, BAZ, SAMP
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk, self.bar_folder.pk, self.baz_folder.pk,
self.spam_file.pk]))
def test_folder_ownership(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=True):
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees only 1 folder : FOO
# he doesn't see BAR, BAZ and SPAM because he doesn't own them
# and no permission has been given
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk]))
def test_with_permission_given_to_folder(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=True):
# give permissions over BAR
FolderPermission.objects.create(
folder=self.bar_folder,
user=self.staff_user,
type=FolderPermission.THIS,
can_edit=FolderPermission.ALLOW,
can_read=FolderPermission.ALLOW,
can_add_children=FolderPermission.ALLOW)
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees 2 folder : FOO, BAR
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk, self.bar_folder.pk]))
def test_with_permission_given_to_parent_folder(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=True):
FolderPermission.objects.create(
folder=self.parent,
user=self.staff_user,
type=FolderPermission.CHILDREN,
can_edit=FolderPermission.ALLOW,
can_read=FolderPermission.ALLOW,
can_add_children=FolderPermission.ALLOW)
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees all items because he has permissions on the parent folder
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk, self.bar_folder.pk, self.baz_folder.pk,
self.spam_file.pk]))
def test_search_against_owner(self):
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id})
response = self.client.get(url, {'q': 'joe'})
item_list = response.context['paginated_items'].object_list
self.assertEqual(len(item_list), 1)
response = self.client.get(url, {'q': 'admin'})
item_list = response.context['paginated_items'].object_list
self.assertEqual(len(item_list), 4)
def test_owner_search_fields(self):
folderadmin = FolderAdmin(Folder, admin.site)
self.assertEqual(folderadmin.owner_search_fields, ['username', 'first_name', 'last_name', 'email'])
folder_qs = folderadmin.filter_folder(Folder.objects.all(), ['[email protected]'])
self.assertEqual(len(folder_qs), 1)
class DontSearchOwnerEmailFolderAdmin(FolderAdmin):
owner_search_fields = ['username', 'first_name', 'last_name']
folderadmin = DontSearchOwnerEmailFolderAdmin(Folder, admin.site)
folder_qs = folderadmin.filter_folder(Folder.objects.all(), ['[email protected]'])
self.assertEqual(len(folder_qs), 0)
|
the-stack_106_25807 | #coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import paddle.fluid as fluid
from paddlehub.finetune import checkpoint_pb2
from paddlehub.common.logger import logger
CKPT_FILE_NAME = "ckpt.meta"
def load_checkpoint(checkpoint_dir, exe, main_program):
ckpt_meta_path = os.path.join(checkpoint_dir, CKPT_FILE_NAME)
ckpt = checkpoint_pb2.CheckPoint()
logger.info("Try loading checkpoint from {}".format(ckpt_meta_path))
if os.path.exists(ckpt_meta_path):
with open(ckpt_meta_path, "rb") as f:
ckpt.ParseFromString(f.read())
current_epoch = 1
global_step = 0
best_score = -999
def if_exist(var):
return os.path.exists(os.path.join(ckpt.latest_model_dir, var.name))
if ckpt.latest_model_dir:
fluid.io.load_vars(
exe, ckpt.latest_model_dir, main_program, predicate=if_exist)
# Compatible with older versions without best_score in checkpoint_pb2
try:
best_score = ckpt.best_score
except:
best_score = -999
logger.info("PaddleHub model checkpoint loaded. current_epoch={}, "
"global_step={}, best_score={:.5f}".format(
ckpt.current_epoch, ckpt.global_step, best_score))
return True, ckpt.current_epoch, ckpt.global_step, best_score
logger.info("PaddleHub model checkpoint not found, start from scratch...")
return False, current_epoch, global_step, best_score
def save_checkpoint(checkpoint_dir,
current_epoch,
global_step,
best_score,
exe,
main_program=fluid.default_main_program()):
ckpt_meta_path = os.path.join(checkpoint_dir, CKPT_FILE_NAME)
ckpt = checkpoint_pb2.CheckPoint()
model_saved_dir = os.path.join(checkpoint_dir, "step_%d" % global_step)
ckpt.current_epoch = current_epoch
ckpt.global_step = global_step
ckpt.latest_model_dir = model_saved_dir
ckpt.best_score = best_score
with open(ckpt_meta_path, "wb") as f:
f.write(ckpt.SerializeToString())
|
the-stack_106_25812 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : openGauss-tools-backup
Case Name : JdbcGsBackup -m dump只导出及导入模式(public模式),不导数据
Description :
1.创建数据库
2.指定数据库下建表,建用户
3.修改信任方式为sha256
4.导出指定数据库,添加-o选项,-s为public
5.删表后执行导入
6.查看导入表
7.清理环境
Expect :
1.创建成功
2.创建成功
3.修改成功
4.导出成功
5.导入成功
6.表导入成功,但无数据
7.清理环境完成
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class ToolsBackup(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.pri_sh = CommonSH('PrimaryDbUser')
self.log.info(
'---Opengauss_Function_JdbcGsBackup_Case0026start---')
self.constant = Constant()
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.Primary_Node = Node('PrimaryDbUser')
self.Root_Node = Node('PrimaryRoot')
self.package = os.path.join(
os.path.dirname(macro.DB_INSTANCE_PATH), 'package_zh')
self.db_name = "gs_db03"
self.user = "us_03"
self.tb_name = "t_03"
self.log.info('---备份pg_hba.conf文件---')
cmd = f"cp {os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')} " \
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf_t_bak')}"
self.log.info(cmd)
msg = self.Primary_Node.sh(cmd).result()
self.log.info(msg)
def test_tools_backup(self):
self.log.info('---创建工具所在目录---')
mkdir_cmd = f'''if [ ! -d "{self.package}" ]
then
mkdir -p {self.package}
fi'''
self.log.info(mkdir_cmd)
result = self.Primary_Node.sh(mkdir_cmd).result()
self.log.info(result)
self.assertEqual(result, '')
self.log.info('---获取openGauss-tools-backup工具包---')
sql_cmd = f'''wget -P {self.package} {macro.PACKAGE_URL}; '''
self.log.info(sql_cmd)
result = self.Primary_Node.sh(sql_cmd).result()
self.log.info(result)
self.assertIn(f"‘{self.package}/openGauss-tools-backup.tar.gz’ saved"
, result)
self.log.info('---解压工具包---')
sql_cmd = f'''cd {self.package};
tar -zxvf openGauss-tools-backup.tar.gz; '''
self.log.info(sql_cmd)
result = self.Primary_Node.sh(sql_cmd).result()
self.log.info(result)
self.assertIn('openGauss-tools-backup', result)
self.log.info('-----创建数据库----')
sql_cmd = self.pri_sh.execut_db_sql(f'''drop database if exists \
{self.db_name};
create database {self.db_name};''')
self.log.info(sql_cmd)
self.assertIn(self.constant.CREATE_DATABASE_SUCCESS, sql_cmd)
self.log.info('---在指定数据库下创建测试用户和表---')
sql_cmd = f'''drop table if exists {self.tb_name};
create table {self.tb_name} (id int);
insert into {self.tb_name} values(1),(2),(3);
drop user if exists {self.user};
create user {self.user} with sysadmin \
password '{macro.COMMON_PASSWD}';'''
sql_result = self.pri_sh.execut_db_sql(sql=sql_cmd,
dbname=f'{self.db_name}')
self.log.info(sql_result)
self.assertIn(self.constant.TABLE_CREATE_SUCCESS, sql_result)
self.assertIn(self.constant.CREATE_ROLE_SUCCESS_MSG, sql_result)
self.log.info('---修改信任方式为sha256---')
cmd = f"grep -nr '127.0.0.1/32' " \
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')}"
self.log.info(cmd)
line = self.Primary_Node.sh(
cmd).result().splitlines()[0].split(':')[0]
self.log.info(line)
cmd = f'sed -i "{str(int(line)+1)} ihost all all ' \
f'{self.Primary_Node.db_host}/32 sha256" ' \
f'{os.path.join(macro.DB_INSTANCE_PATH, "pg_hba.conf")}; ' \
f'cat {os.path.join(macro.DB_INSTANCE_PATH, "pg_hba.conf")};'
self.log.info(cmd)
result = self.Primary_Node.sh(cmd).result()
self.log.info(result)
restart_msg = self.pri_sh.restart_db_cluster()
self.log.info(restart_msg)
status = self.pri_sh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
self.log.info('---导出---')
sql_cmd = f'''cd {self.package}/openGauss-tools-backup;\
java -jar openGauss-tools-backup.jar \
-m dump \
-d {self.db_name} \
-h {self.Primary_Node.db_host} \
-p {self.Primary_Node.db_port} \
-U {self.user} \
-P {macro.COMMON_PASSWD} \
-o \
-s public \
-t '''
self.log.info(sql_cmd)
msg = self.Primary_Node.sh(sql_cmd).result()
self.log.info(msg)
self.assertIn(self.constant.jdbcgsbackup_success, msg)
self.assertNotIn(self.constant.jdbcgsbackup_failed[0] and
self.constant.jdbcgsbackup_failed[1] and
self.constant.jdbcgsbackup_failed[2], msg)
self.log.info('---删表后执行导入---')
sql_cmd = f'''drop table if exists {self.tb_name} cascade;'''
sql_result = self.pri_sh.execut_db_sql(sql=sql_cmd,
dbname=f'{self.db_name}')
self.log.info(sql_result)
self.assertIn(self.constant.TABLE_DROP_SUCCESS, sql_result)
self.log.info('---导入---')
sql_cmd = f'''cd {self.package}/openGauss-tools-backup;\
java -jar openGauss-tools-backup.jar \
-m restore \
-d {self.db_name} \
-h {self.Primary_Node.db_host} \
-p {self.Primary_Node.db_port} \
-U {self.user} \
-P {macro.COMMON_PASSWD} \
-s public \
-n public'''
self.log.info(sql_cmd)
msg = self.Primary_Node.sh(sql_cmd).result()
self.log.info(msg)
self.assertIn(self.constant.jdbcgsbackup_success, msg)
self.assertNotIn(self.constant.jdbcgsbackup_failed[0] and
self.constant.jdbcgsbackup_failed[1] and
self.constant.jdbcgsbackup_failed[2], msg)
self.log.info('---查询导入的表,无数据---')
sql_cmd = f'''select * from {self.tb_name};'''
sql_result = self.pri_sh.execut_db_sql(sql=sql_cmd,
dbname=f'{self.db_name}')
self.log.info(sql_result)
self.assertIn('0 rows', sql_result)
def tearDown(self):
self.log.info('---清理环境---')
sql_cmd = f'''rm -rf {self.package};'''
self.log.info(sql_cmd)
result = self.Root_Node.sh(sql_cmd).result()
self.log.info(result)
cmd = f"rm -rf " \
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')};" \
f"mv " \
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf_t_bak')} " \
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')}"
self.log.info(cmd)
self.Primary_Node.sh(cmd)
sql_cmd = self.pri_sh.execut_db_sql(f'''drop database if exists \
{self.db_name};
drop user if exists {self.user} cascade;''')
self.log.info(sql_cmd)
self.log.info(
'---Opengauss_Function_JdbcGsBackup_Case0026finish---')
|
the-stack_106_25814 | '''
Script contains helper functions to load data from MIMIC-III.
'''
import pandas as pd
import torch.utils.data as data_utils
def load_mimic3(fpath='data/sepsis3_processed_data.csv'):
data = pd.read_csv(fpath, index_col=0)
traj_ids = data['traj'].unique()
return traj_ids, data
def collate_fn(batch):
return tuple(zip(*batch))
class MIMICIIIDataset(data_utils.Dataset):
def __init__(self, traj_ids, data, sensitive_attr='gender', return_next_state=False):
self.traj_ids = traj_ids
self.data = data
self.sens_attr = sensitive_attr
self.return_next_state = return_next_state
def __len__(self):
return len(self.traj_ids)
def __getitem__(self, index):
trajectory = self.traj_ids[index]
if type(trajectory) != list:
trajectory = [trajectory]
return self.process_trajectories(trajectory)
def process_trajectories(self, trajectory):
boolean_series = self.data.traj.isin(trajectory)
sel_ind = boolean_series.values.nonzero()[0]
filtered_data = self.data.iloc[sel_ind]
state_features = filtered_data.drop(['traj', 'step', 'a:action', 'r:reward'], axis=1)
action = filtered_data['a:action']
outcome = filtered_data['r:reward'][filtered_data['r:reward']!=0]
next_state_features = filtered_data.copy()
next_state_features = next_state_features[next_state_features['step']>0]
next_state_features = next_state_features.append(filtered_data.tail(1))
next_state_features = next_state_features.drop(['traj', 'step', 'a:action', 'r:reward'], axis=1)
assert len(state_features) == len(next_state_features), "state feature counts do not match."
if self.sens_attr == 'gender':
sens_attr = filtered_data['o:gender'][filtered_data['r:reward']!=0]
elif self.sens_attr == 'age':
sens_attr = filtered_data['o:age'][filtered_data['r:reward']!=0]
else:
pass
seq_info = filtered_data[['traj', 'step']]
if self.return_next_state:
return state_features, next_state_features, action, seq_info, outcome, sens_attr
else:
return state_features, action, seq_info, outcome, sens_attr |
the-stack_106_25816 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Tcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import TcoinTestFramework
from test_framework.util import *
class TxnMallTest(TcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 TCN:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 TCN to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 TCN coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50TCN for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100TCN for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
|
the-stack_106_25820 | import os
import subprocess
currentdir = os.path.dirname(__file__)
examplesdir = os.path.join(
currentdir, os.path.join(os.pardir, os.pardir), 'examples'
)
example_files = []
for root, dirs, files in os.walk(examplesdir):
for basneame in files:
if basneame.endswith('.py'):
example_files.append(os.path.abspath(
os.path.join(root, basneame)
))
print('"{0}" examples found!'.format(len(example_files)))
for path in example_files:
print('-- ', path)
cmd = ['python3', path]
subprocess.check_call(cmd, env=os.environ)
|
the-stack_106_25822 | from django import template
register = template.Library()
@register.filter
def flow_color(value):
if value == 0.0:
return "rgb(255, 255, 255)"
elif value > 0.0:
max_light = 196
light = int(max_light - min(max_light, value / 30.0 * max_light))
return "rgb({}, {}, 255)".format(light, light)
else:
return "rgb(255, 127, 127)"
|
the-stack_106_25823 | # -*- coding: utf-8 -*-
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the auto_forensicate script."""
from __future__ import unicode_literals
import argparse
import logging
import os
import StringIO
import sys
import tempfile
import unittest
import auto_acquire
from auto_forensicate import errors
from auto_forensicate import uploader
from auto_forensicate.recipes import base
import mock
DEFAULT_ARTIFACT_CONTENT = os.urandom(1000)
class StringIORecipe(base.BaseRecipe):
"""A Recipe returning 1 artifact with a StringIO."""
def __init__(self, name, options=None):
super(StringIORecipe, self).__init__(name, options=options)
self.ran_collection = False
def GetArtifacts(self):
return [base.StringArtifact('fake/path', DEFAULT_ARTIFACT_CONTENT)]
class FailingRecipe(base.BaseRecipe):
"""A Recipe raising an IOError when running GetArtifact."""
def GetArtifacts(self):
raise errors.RecipeException('Everything is terrible.')
class FileCopyUploader(object):
"""Test implementation of an Uploader object that copies content to a file."""
def __init__(self, destination_file):
self._origin_dir = os.getcwd()
self.destination_file = destination_file
def UploadArtifact(self, artifact, update_callback=None):
data = artifact._GetStream().read()
self.destination_file.write(data)
if update_callback:
update_callback(len(data), len(data))
class AutoForensicateTest(unittest.TestCase):
"""Tests for the AutoForensicate class.
TODO(romaing): Add tests for Main(), by setting sys.argv and testing
the proper recipes ran.
"""
def FakeBadParseGCSJSON(self, _):
return None
def FakeParseGCSJSON(self, _):
return {'client_id': 'fake_client_id'}
def FakeMakeProgressBar(self, max_size, name, message=None): # pylint: disable=unused-argument
return mock.create_autospec(auto_acquire.BaBar, spec_set=True)
def testParseArgsHelp(self):
recipes = {
'test1': None,
'test2': None
}
self.maxDiff = None
af = auto_acquire.AutoForensicate(recipes=recipes)
parser = af._CreateParser()
expected_help = (
'usage: run_tests.py [-h] --acquire {all,test1,test2} [--gs_keyfile '
'GS_KEYFILE]\n'
' [--logging {stackdriver,stdout}] [--select_disks]'
'\n'
' destination\n\n'
'Autopush forensics evidence to Cloud Storage\n\n'
'positional arguments:\n'
' destination Sets the destination for uploads. For example'
'\n gs://bucket_name/path will upload to GCS in'
' bucket\n <bucket_name> in the folder </path/>'
'\n\n'
'optional arguments:\n'
' -h, --help show this help message and exit\n'
' --acquire {all,test1,test2}\n'
' Evidence to acquire\n'
' --gs_keyfile GS_KEYFILE\n'
' Path to the service account private key JSON '
'file for\n Google Cloud\n'
' --logging {stackdriver,stdout}\n'
' Selects logging methods.\n'
' --select_disks Asks the user to select which disk to acquire'
'\n'
)
self.assertEqual(parser.format_help(), expected_help)
def testParseDestination(self):
recipes = {
'test1': None,
'test2': None
}
af = auto_acquire.AutoForensicate(recipes=recipes)
test_args = ['--acquire', 'all', 'destination_url']
options = af.ParseArguments(test_args)
self.assertEqual(options.destination, 'destination_url')
def testParseArgsRequiredJson(self):
recipes = {
'test1': None,
'test2': None
}
af = auto_acquire.AutoForensicate(recipes=recipes)
test_args = ['--acquire', 'test1', '--logging', 'stackdriver']
with self.assertRaises(SystemExit):
prev_stderr = sys.stderr
sys.stderr = StringIO.StringIO()
af.ParseArguments(test_args)
sys.stderr = prev_stderr
def testParseArgsRequiredURL(self):
recipes = {
'test1': None,
'test2': None
}
af = auto_acquire.AutoForensicate(recipes=recipes)
test_args = ['--acquire', 'test1', '--gs_keyfile=null']
prev_stderr = sys.stderr
sys.stderr = StringIO.StringIO()
with self.assertRaises(SystemExit):
af.ParseArguments(test_args)
sys.stderr = prev_stderr
def testParseAcquireOneRecipe(self):
recipes = {
'test1': None,
'test2': None
}
test_args = ['--acquire', 'test1', 'nfs://destination']
af = auto_acquire.AutoForensicate(recipes=recipes)
parser = af._CreateParser()
options = parser.parse_args(test_args)
expected_recipes = ['test1']
self.assertEqual(options.acquire, expected_recipes)
def testParseAcquireBad(self):
recipes = {
'test1': None,
'test2': None
}
af = auto_acquire.AutoForensicate(recipes=recipes)
test_args = [
'--acquire', 'test4', '--acquire', 'all',
'--gs_keyfile=file', 'gs://bucket']
prev_stderr = sys.stderr
sys.stderr = StringIO.StringIO()
with self.assertRaises(SystemExit):
af.ParseArguments(test_args)
sys.stderr = prev_stderr
def testParseAcquireAll(self):
recipes = {
'test1': None,
'test2': None
}
af = auto_acquire.AutoForensicate(recipes=recipes)
test_args = ['--acquire', 'test1', '--acquire', 'all', 'gs://bucket']
options = af.ParseArguments(test_args)
expected_recipes = ['test1', 'test2']
self.assertEqual(options.acquire, expected_recipes)
def testMakeUploader(self):
af = auto_acquire.AutoForensicate(recipes={'test': None})
options = af.ParseArguments(['--acquire', 'all', 'destination'])
uploader_object = af._MakeUploader(options)
self.assertEqual(uploader_object, None)
options = af.ParseArguments(['--acquire', 'all', 'gs://destination'])
with self.assertRaises(errors.BadConfigOption):
# We need a --gs_keyfile option for gs:// URLs
uploader_object = af._MakeUploader(options)
af._ParseGCSJSON = self.FakeBadParseGCSJSON
options = af.ParseArguments(
['--acquire', 'all', '--gs_keyfile', 'keyfile', 'gs://destination'])
with self.assertRaises(errors.BadConfigOption):
# Invalid gs_keyfile
uploader_object = af._MakeUploader(options)
af._ParseGCSJSON = self.FakeParseGCSJSON
options = af.ParseArguments(
['--acquire', 'all', '--gs_keyfile', 'keyfile', 'gs://destination'])
uploader_object = af._MakeUploader(options)
self.assertIsInstance(uploader_object, uploader.GCSUploader)
def testFailDo(self):
af = auto_acquire.AutoForensicate(recipes={})
recipe = FailingRecipe('fail')
with tempfile.TemporaryFile() as destination:
uploader_object = FileCopyUploader(destination)
af._uploader = uploader_object
with self.assertRaises(errors.RecipeException):
af.Do(recipe)
def testDo(self):
af = auto_acquire.AutoForensicate(recipes={})
parser = argparse.ArgumentParser()
parser.add_argument('--fake', action='store_true')
options = parser.parse_args(['--fake'])
af._logger = logging.getLogger(self.__class__.__name__)
af._MakeProgressBar = self.FakeMakeProgressBar
recipe = StringIORecipe('stringio', options=options)
self.assertTrue(recipe._options.fake)
with tempfile.TemporaryFile() as destination:
uploader_object = FileCopyUploader(destination)
af._uploader = uploader_object
af.Do(recipe)
destination.seek(0)
copied_data = destination.read()
self.assertEqual(copied_data, DEFAULT_ARTIFACT_CONTENT)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_25824 | from network import WLAN
wlan = WLAN(mode=WLAN.STA)
import pycom
import time
# initialisation code
pycom.heartbeat(False)
pycom.rgbled(0x008080) # Cyan
# Connect to Wifi
nets = wlan.scan()
for net in nets:
if net.ssid == 'HDTL-a':
print('Network found!')
wlan.connect(net.ssid, auth=(net.sec, 'FEEDDA1961'), timeout=5000)
while not wlan.isconnected():
machine.idle() # save power while waiting
print('WLAN connection succeeded!')
break
while True:
pycom.rgbled(0x800000) # Red
time.sleep(1)
pycom.rgbled(0x008000) # Green
time.sleep(1)
pycom.rgbled(0x000080) # Blue
time.sleep(1)
|
the-stack_106_25827 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-statements,no-name-in-module,import-error
import os.path
import platform
from argcomplete.completers import FilesCompleter
from azure.cli.core.commands.parameters import (
file_type, get_enum_type, get_resource_name_completion_list, get_three_state_flag, name_type, tags_type, zones_type, edge_zone_type)
from azure.cli.core.commands.validators import validate_file_or_dict
from azure.cli.core.profiles import ResourceType
from knack.arguments import CLIArgumentType
from ._completers import (
get_vm_size_completion_list, get_k8s_versions_completion_list, get_k8s_upgrades_completion_list, get_ossku_completion_list)
from ._validators import (
validate_create_parameters, validate_kubectl_version, validate_kubelogin_version, validate_k8s_version, validate_linux_host_name,
validate_list_of_integers, validate_ssh_key, validate_nodes_count,
validate_nodepool_name, validate_vm_set_type, validate_load_balancer_sku, validate_nodepool_id, validate_snapshot_id,
validate_load_balancer_outbound_ips, validate_priority, validate_eviction_policy, validate_spot_max_price,
validate_load_balancer_outbound_ip_prefixes, validate_taints, validate_ip_ranges, validate_acr, validate_nodepool_tags,
validate_load_balancer_outbound_ports, validate_load_balancer_idle_timeout, validate_vnet_subnet_id, validate_nodepool_labels,
validate_ppg, validate_assign_identity, validate_max_surge, validate_assign_kubelet_identity, validate_credential_format)
from ._consts import (
CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_SPOT_EVICTION_POLICY_DEALLOCATE,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_DOWN_MODE_DEALLOCATE,
CONST_OS_DISK_TYPE_MANAGED,
CONST_OS_DISK_TYPE_EPHEMERAL,
CONST_RAPID_UPGRADE_CHANNEL,
CONST_STABLE_UPGRADE_CHANNEL,
CONST_PATCH_UPGRADE_CHANNEL,
CONST_NODE_IMAGE_UPGRADE_CHANNEL,
CONST_NONE_UPGRADE_CHANNEL,
CONST_NODEPOOL_MODE_SYSTEM,
CONST_NODEPOOL_MODE_USER,
)
# candidates for enumeration, no longer maintained
orchestrator_types = ["Custom", "DCOS", "Kubernetes", "Swarm", "DockerCE"]
regions_in_preview = [
"canadacentral",
"canadaeast",
"centralindia",
"koreasouth",
"koreacentral",
"southindia",
"uksouth",
"ukwest",
"westcentralus",
"westindia",
"westus2",
]
regions_in_prod = [
"australiaeast",
"australiasoutheast",
"brazilsouth",
"centralus",
"eastasia",
"eastus",
"eastus2",
"japaneast",
"japanwest",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"westeurope",
"westus",
]
storage_profile_types = ["StorageAccount", "ManagedDisks"]
# candidates for enumeration, under support
node_mode_types = [CONST_NODEPOOL_MODE_SYSTEM, CONST_NODEPOOL_MODE_USER]
node_priorities = [CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT]
node_eviction_policies = [CONST_SPOT_EVICTION_POLICY_DELETE, CONST_SPOT_EVICTION_POLICY_DEALLOCATE]
node_os_disk_types = [CONST_OS_DISK_TYPE_MANAGED, CONST_OS_DISK_TYPE_EPHEMERAL]
network_plugins = ['azure', 'kubenet']
outbound_types = [CONST_OUTBOUND_TYPE_LOAD_BALANCER, CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING]
auto_upgrade_channels = [
CONST_RAPID_UPGRADE_CHANNEL,
CONST_STABLE_UPGRADE_CHANNEL,
CONST_PATCH_UPGRADE_CHANNEL,
CONST_NODE_IMAGE_UPGRADE_CHANNEL,
CONST_NONE_UPGRADE_CHANNEL,
]
dev_space_endpoint_types = ['Public', 'Private', 'None']
def load_arguments(self, _):
acr_arg_type = CLIArgumentType(metavar='ACR_NAME_OR_RESOURCE_ID')
# ACS command argument configuration
with self.argument_context('acs') as c:
c.argument('resource_name', name_type,
completer=get_resource_name_completion_list(
'Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('name', name_type,
completer=get_resource_name_completion_list(
'Microsoft.ContainerService/ContainerServices'),
help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`')
c.argument('container_service_name', name_type, help='Name of the container service. You can configure the default using `az configure --defaults acs=<name>`',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
c.argument('admin_username', options_list=[
'--admin-username', '-u'], default='azureuser')
c.argument('api_version',
help=_get_feature_in_preview_message() + 'Use API version of ACS to perform az acs operations. Available options: 2017-01-31, 2017-07-01. Default: the latest version for the location')
c.argument('dns_name_prefix', options_list=['--dns-prefix', '-d'])
c.argument('orchestrator_type', get_enum_type(
orchestrator_types), options_list=['--orchestrator-type', '-t'])
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('tags', tags_type)
c.argument('disable_browser',
help='Do not open browser after opening a proxy to the cluster web user interface')
with self.argument_context('acs create') as c:
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('master_profile', options_list=['--master-profile', '-m'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the master profile. Note it will override any master settings once set')
c.argument('master_vm_size', completer=get_vm_size_completion_list,
help=_get_feature_in_preview_message())
c.argument('agent_count', type=int)
c.argument('generate_ssh_keys', action='store_true', validator=validate_create_parameters,
help='Generate SSH public and private key files if missing')
c.argument('master_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'The disk size for master pool vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('master_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'The custom vnet subnet id. Note agent need to used the same vnet if master set. Default: ""')
c.argument('master_first_consecutive_static_ip', type=str,
help=_get_feature_in_preview_message() + 'The first consecutive ip used to specify static ip block.')
c.argument('master_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Default: varies based on Orchestrator')
c.argument('agent_profiles', options_list=['--agent-profiles', '-a'], type=validate_file_or_dict,
help=_get_feature_in_preview_message() + 'The file or dictionary representation of the agent profiles. Note it will override any agent settings once set')
c.argument('agent_vm_size', completer=get_vm_size_completion_list,
help='Set the default size for agent pools vms.')
c.argument('agent_osdisk_size', type=int,
help=_get_feature_in_preview_message() + 'Set the default disk size for agent pools vms. Unit in GB. Default: corresponding vmsize disk size')
c.argument('agent_vnet_subnet_id', type=str,
help=_get_feature_in_preview_message() + 'Set the default custom vnet subnet id for agent pools. Note agent need to used the same vnet if master set. Default: ""')
c.argument('agent_ports', type=validate_list_of_integers,
help=_get_feature_in_preview_message() + 'Set the default ports exposed on the agent pools. Only usable for non-Kubernetes. Default: 8080,4000,80')
c.argument('agent_storage_profile', get_enum_type(storage_profile_types),
help=_get_feature_in_preview_message() + 'Set default storage profile for agent pools. Default: varies based on Orchestrator')
c.argument('windows', action='store_true',
help='If true, set the default osType of agent pools to be Windows.')
c.argument('validate', action='store_true',
help='Generate and validate the ARM template without creating any resources')
c.argument('orchestrator_version', help=_get_feature_in_preview_message(
) + 'Use Orchestrator Version to specify the semantic version for your choice of orchestrator.')
with self.argument_context('acs scale') as c:
c.argument('new_agent_count', type=int)
for scope in ['dcos', 'kubernetes']:
with self.argument_context('acs {} browse'.format(scope)) as c:
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
with self.argument_context('acs dcos install-cli') as c:
c.argument('install_location',
default=_get_default_install_location('dcos'))
with self.argument_context('acs kubernetes get-credentials') as c:
c.argument('path', options_list=['--file', '-f'])
c.argument('overwrite_existing', action='store_true',
help='If specified, overwrite any existing credentials.')
with self.argument_context('acs kubernetes install-cli') as c:
c.argument('install_location', type=file_type, completer=FilesCompleter(),
default=_get_default_install_location('kubectl'))
c.argument('ssh_key_file', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter(), help='Path to an SSH key file to use.')
# AKS command argument configuration
with self.argument_context('aks', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('resource_name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('name', name_type, help='Name of the managed cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/ManagedClusters'))
c.argument('kubernetes_version', options_list=[
'--kubernetes-version', '-k'], validator=validate_k8s_version)
c.argument('node_count', options_list=['--node-count', '-c'], type=int)
c.argument('tags', tags_type)
with self.argument_context('aks create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('kubernetes_version',
completer=get_k8s_versions_completion_list)
c.argument('admin_username', options_list=[
'--admin-username', '-u'], default='azureuser')
c.argument('dns_name_prefix', options_list=['--dns-name-prefix', '-p'])
c.argument('generate_ssh_keys', action='store_true',
validator=validate_create_parameters)
c.argument('node_vm_size', options_list=[
'--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('nodepool_name', type=str, default='nodepool1',
help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name)
c.argument('os_sku', completer=get_ossku_completion_list)
c.argument('ssh_key_value', required=False, type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'),
completer=FilesCompleter(), validator=validate_ssh_key)
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
c.argument('dns_service_ip')
c.argument('docker_bridge_address')
c.argument('edge_zone', edge_zone_type)
c.argument('load_balancer_sku', type=str,
validator=validate_load_balancer_sku)
c.argument('load_balancer_managed_outbound_ip_count', type=int)
c.argument('load_balancer_outbound_ips', type=str,
validator=validate_load_balancer_outbound_ips)
c.argument('load_balancer_outbound_ip_prefixes', type=str,
validator=validate_load_balancer_outbound_ip_prefixes)
c.argument('load_balancer_outbound_ports', type=int,
validator=validate_load_balancer_outbound_ports)
c.argument('load_balancer_idle_timeout', type=int,
validator=validate_load_balancer_idle_timeout)
c.argument('outbound_type', arg_type=get_enum_type(outbound_types))
c.argument('auto_upgrade_channel', arg_type=get_enum_type(auto_upgrade_channels))
c.argument('enable_cluster_autoscaler', action='store_true')
c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"],
help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.")
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('vm_set_type', type=str, validator=validate_vm_set_type)
c.argument('zones', zones_type, options_list=[
'--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.')
c.argument('uptime_sla', action='store_true')
c.argument('enable_addons', options_list=['--enable-addons', '-a'])
c.argument('disable_rbac', action='store_true')
c.argument('enable_rbac', action='store_true', options_list=['--enable-rbac', '-r'],
deprecate_info=c.deprecate(redirect="--disable-rbac", hide="2.0.45"))
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'])
c.argument('network_plugin', arg_type=get_enum_type(network_plugins))
c.argument('network_policy')
c.argument('no_ssh_key', options_list=['--no-ssh-key', '-x'])
c.argument('pod_cidr')
c.argument('service_cidr')
c.argument('ppg', type=str, validator=validate_ppg)
c.argument('node_osdisk_type', arg_type=get_enum_type(node_os_disk_types))
c.argument('node_osdisk_size', type=int)
c.argument('vnet_subnet_id', type=str,
validator=validate_vnet_subnet_id)
c.argument('workspace_resource_id')
c.argument('enable_msi_auth_for_monitoring', arg_type=get_three_state_flag(), is_preview=True)
c.argument('skip_subnet_role_assignment', action='store_true')
c.argument('api_server_authorized_ip_ranges',
type=str, validator=validate_ip_ranges)
c.argument('attach_acr', acr_arg_type)
c.argument('enable_private_cluster', action='store_true')
c.argument('private_dns_zone')
c.argument('fqdn_subdomain')
c.argument('disable_public_fqdn', action='store_true')
c.argument('nodepool_tags', nargs='*', validator=validate_nodepool_tags,
help='space-separated tags: key[=value] [key[=value] ...]. Use "" to clear existing tags.')
c.argument('enable_managed_identity', action='store_true')
c.argument('assign_identity', type=str,
validator=validate_assign_identity)
c.argument('nodepool_labels', nargs='*', validator=validate_nodepool_labels,
help='space-separated labels: key[=value] [key[=value] ...]. See https://aka.ms/node-labels for syntax of labels.')
c.argument('enable_node_public_ip', action='store_true')
c.argument('node_public_ip_prefix_id', type=str)
c.argument('windows_admin_username', options_list=[
'--windows-admin-username'])
c.argument('windows_admin_password', options_list=[
'--windows-admin-password'])
c.argument('enable_ahub', options_list=['--enable-ahub'], action='store_true')
c.argument('node_osdisk_diskencryptionset_id', type=str,
options_list=['--node-osdisk-diskencryptionset-id', '-d'])
c.argument('aci_subnet_name')
c.argument('enable_encryption_at_host', options_list=[
'--enable-encryption-at-host'], action='store_true')
c.argument('enable_ultra_ssd', options_list=[
'--enable-ultra-ssd'], action='store_true')
c.argument('appgw_name', options_list=[
'--appgw-name'], arg_group='Application Gateway')
c.argument('appgw_subnet_cidr', options_list=[
'--appgw-subnet-cidr'], arg_group='Application Gateway')
c.argument('appgw_id', options_list=[
'--appgw-id'], arg_group='Application Gateway')
c.argument('appgw_subnet_id', options_list=[
'--appgw-subnet-id'], arg_group='Application Gateway')
c.argument('appgw_watch_namespace', options_list=[
'--appgw-watch-namespace'], arg_group='Application Gateway')
c.argument('assign_kubelet_identity', validator=validate_assign_kubelet_identity)
c.argument('disable_local_accounts', action='store_true')
c.argument('enable_secret_rotation', action='store_true')
c.argument('rotation_poll_interval', type=str)
c.argument('enable_windows_gmsa', action='store_true',
options_list=['--enable-windows-gmsa'])
c.argument('gmsa_dns_server', options_list=['--gmsa-dns-server'])
c.argument('gmsa_root_domain_name', options_list=[
'--gmsa-root-domain-name'])
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('enable_sgxquotehelper', action='store_true')
c.argument('enable_fips_image', action='store_true')
c.argument('snapshot_id', validator=validate_snapshot_id)
with self.argument_context('aks update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('attach_acr', acr_arg_type, validator=validate_acr)
c.argument('detach_acr', acr_arg_type, validator=validate_acr)
with self.argument_context('aks update') as c:
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=[
"--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=[
"--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('cluster_autoscaler_profile', nargs='+', options_list=["--cluster-autoscaler-profile", "--ca-profile"],
help="Space-separated list of key=value pairs for configuring cluster autoscaler. Pass an empty string to clear the profile.")
c.argument('min_count', type=int, validator=validate_nodes_count)
c.argument('max_count', type=int, validator=validate_nodes_count)
c.argument('uptime_sla', action='store_true')
c.argument('no_uptime_sla', action='store_true')
c.argument('load_balancer_managed_outbound_ip_count', type=int)
c.argument('load_balancer_outbound_ips', type=str,
validator=validate_load_balancer_outbound_ips)
c.argument('load_balancer_outbound_ip_prefixes', type=str,
validator=validate_load_balancer_outbound_ip_prefixes)
c.argument('load_balancer_outbound_ports', type=int,
validator=validate_load_balancer_outbound_ports)
c.argument('load_balancer_idle_timeout', type=int,
validator=validate_load_balancer_idle_timeout)
c.argument('auto_upgrade_channel', arg_type=get_enum_type(auto_upgrade_channels))
c.argument('api_server_authorized_ip_ranges',
type=str, validator=validate_ip_ranges)
c.argument('enable_ahub', options_list=['--enable-ahub'], action='store_true')
c.argument('disable_ahub', options_list=['--disable-ahub'], action='store_true')
c.argument('enable_public_fqdn', action='store_true')
c.argument('disable_public_fqdn', action='store_true')
c.argument('windows_admin_password', options_list=[
'--windows-admin-password'])
c.argument('enable_managed_identity', action='store_true')
c.argument('assign_identity', type=str,
validator=validate_assign_identity)
c.argument('disable_local_accounts', action='store_true')
c.argument('enable_local_accounts', action='store_true')
c.argument('enable_secret_rotation', action='store_true')
c.argument('disable_secret_rotation', action='store_true')
c.argument('rotation_poll_interval', type=str)
c.argument('enable_windows_gmsa', action='store_true',
options_list=['--enable-windows-gmsa'])
c.argument('gmsa_dns_server', options_list=['--gmsa-dns-server'])
c.argument('gmsa_root_domain_name', options_list=[
'--gmsa-root-domain-name'])
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('nodepool_labels', nargs='*', validator=validate_nodepool_labels,
help='space-separated labels: key[=value] [key[=value] ...]. See https://aka.ms/node-labels for syntax of labels.')
with self.argument_context('aks disable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('addons', options_list=['--addons', '-a'])
with self.argument_context('aks enable-addons', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('addons', options_list=['--addons', '-a'])
c.argument('subnet_name', options_list=[
'--subnet-name', '-s'], help='Name of an existing subnet to use with the virtual-node add-on.')
c.argument('appgw_name', options_list=[
'--appgw-name'], arg_group='Application Gateway')
c.argument('appgw_subnet_cidr', options_list=[
'--appgw-subnet-cidr'], arg_group='Application Gateway')
c.argument('appgw_id', options_list=[
'--appgw-id'], arg_group='Application Gateway')
c.argument('appgw_subnet_id', options_list=[
'--appgw-subnet-id'], arg_group='Application Gateway')
c.argument('appgw_watch_namespace', options_list=[
'--appgw-watch-namespace'], arg_group='Application Gateway')
c.argument('enable_sgxquotehelper', action='store_true')
c.argument('enable_secret_rotation', action='store_true')
c.argument('rotation_poll_interval', type=str)
with self.argument_context('aks get-credentials', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('admin', options_list=['--admin', '-a'], default=False)
c.argument('context_name', options_list=['--context'],
help='If specified, overwrite the default context name. The `--admin` parameter takes precedence over `--context`')
c.argument('path', options_list=['--file', '-f'], type=file_type, completer=FilesCompleter(),
default=os.path.join(os.path.expanduser('~'), '.kube', 'config'))
c.argument('public_fqdn', default=False, action='store_true')
c.argument('credential_format', type=str, options_list=['--format'], validator=validate_credential_format)
for scope in ['aks', 'acs kubernetes', 'acs dcos']:
with self.argument_context('{} install-cli'.format(scope)) as c:
c.argument('client_version', validator=validate_kubectl_version,
help='Version of kubectl to install.')
c.argument('install_location', default=_get_default_install_location(
'kubectl'), help='Path at which to install kubectl.')
c.argument('base_src_url',
help='Base download source URL for kubectl releases.')
c.argument('kubelogin_version', validator=validate_kubelogin_version,
help='Version of kubelogin to install.')
c.argument('kubelogin_install_location', default=_get_default_install_location(
'kubelogin'), help='Path at which to install kubelogin.')
c.argument('kubelogin_base_src_url', options_list=[
'--kubelogin-base-src-url', '-l'], help='Base download source URL for kubelogin releases.')
with self.argument_context('aks update-credentials', arg_group='Service Principal') as c:
c.argument('reset_service_principal', action='store_true')
c.argument('service_principal')
c.argument('client_secret')
with self.argument_context('aks update-credentials', arg_group='AAD') as c:
c.argument('reset_aad', action='store_true')
c.argument('aad_client_app_id')
c.argument('aad_server_app_id')
c.argument('aad_server_app_secret')
c.argument('aad_tenant_id')
with self.argument_context('aks upgrade', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('kubernetes_version',
completer=get_k8s_upgrades_completion_list)
c.argument('yes', options_list=[
'--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
with self.argument_context('aks scale', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('nodepool_name', type=str,
help='Node pool name, up to 12 alphanumeric characters', validator=validate_nodepool_name)
with self.argument_context('aks nodepool', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='managed_clusters') as c:
c.argument('cluster_name', type=str, help='The cluster name.')
for scope in ['aks nodepool add']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=[
'--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
c.argument('zones', zones_type, options_list=[
'--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.')
c.argument('node_vm_size', options_list=[
'--node-vm-size', '-s'], completer=get_vm_size_completion_list)
c.argument('max_pods', type=int, options_list=['--max-pods', '-m'])
c.argument('os_type', type=str)
c.argument('os_sku', completer=get_ossku_completion_list)
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('scale_down_mode', arg_type=get_enum_type([CONST_SCALE_DOWN_MODE_DELETE, CONST_SCALE_DOWN_MODE_DEALLOCATE]))
c.argument('node_taints', validator=validate_taints)
c.argument('priority', arg_type=get_enum_type(node_priorities), validator=validate_priority)
c.argument('eviction_policy', arg_type=get_enum_type(node_eviction_policies), validator=validate_eviction_policy)
c.argument('spot_max_price', type=float,
validator=validate_spot_max_price)
c.argument('tags', tags_type)
c.argument('labels', nargs='*', validator=validate_nodepool_labels)
c.argument('mode', get_enum_type(node_mode_types))
c.argument('enable_node_public_ip', action='store_true')
c.argument('node_public_ip_prefix_id', type=str)
c.argument('ppg', type=str, validator=validate_ppg)
c.argument('max_surge', type=str, validator=validate_max_surge)
c.argument('node_osdisk_type', arg_type=get_enum_type(node_os_disk_types))
c.argument('node_osdisk_size', type=int)
c.argument('enable_encryption_at_host', options_list=[
'--enable-encryption-at-host'], action='store_true')
c.argument('enable_ultra_ssd', options_list=[
'--enable-ultra-ssd'], action='store_true')
c.argument('enable_fips_image', action='store_true')
c.argument('snapshot_id', validator=validate_snapshot_id)
for scope in ['aks nodepool show', 'aks nodepool delete', 'aks nodepool scale', 'aks nodepool upgrade', 'aks nodepool update']:
with self.argument_context(scope) as c:
c.argument('nodepool_name', type=str, options_list=[
'--name', '-n'], validator=validate_nodepool_name, help='The node pool name.')
with self.argument_context('aks nodepool upgrade') as c:
c.argument('snapshot_id', validator=validate_snapshot_id)
with self.argument_context('aks nodepool update', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='agent_pools') as c:
c.argument('enable_cluster_autoscaler', options_list=[
"--enable-cluster-autoscaler", "-e"], action='store_true')
c.argument('disable_cluster_autoscaler', options_list=[
"--disable-cluster-autoscaler", "-d"], action='store_true')
c.argument('update_cluster_autoscaler', options_list=[
"--update-cluster-autoscaler", "-u"], action='store_true')
c.argument('scale_down_mode', arg_type=get_enum_type([CONST_SCALE_DOWN_MODE_DELETE, CONST_SCALE_DOWN_MODE_DEALLOCATE]))
c.argument('tags', tags_type)
c.argument('mode', get_enum_type(node_mode_types))
c.argument('max_surge', type=str, validator=validate_max_surge)
c.argument('labels', nargs='*', validator=validate_nodepool_labels)
c.argument('node_taints', validator=validate_taints)
with self.argument_context('aks command invoke') as c:
c.argument('command_string', type=str, options_list=[
"--command", "-c"], help='the command to run')
c.argument('command_files', options_list=["--file", "-f"], required=False, action="append",
help='attach any files the command may use, or use \'.\' to upload the current folder.')
with self.argument_context('aks command result') as c:
c.argument('command_id', type=str, options_list=[
"--command-id", "-i"], help='the command ID from "aks command invoke"')
with self.argument_context('aks use-dev-spaces') as c:
c.argument('update', options_list=['--update'], action='store_true')
c.argument('space_name', options_list=['--space', '-s'])
c.argument('endpoint_type', get_enum_type(dev_space_endpoint_types, default='Public'), options_list=['--endpoint', '-e'])
c.argument('prompt', options_list=[
'--yes', '-y'], action='store_true', help='Do not prompt for confirmation. Requires --space.')
with self.argument_context('aks remove-dev-spaces') as c:
c.argument('prompt', options_list=[
'--yes', '-y'], action='store_true', help='Do not prompt for confirmation')
# OpenShift command argument configuration
with self.argument_context('openshift', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument('resource_name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('name', name_type, help='Name of the managed OpenShift cluster.',
completer=get_resource_name_completion_list('Microsoft.ContainerService/OpenShiftManagedClusters'))
c.argument('compute_count', options_list=[
'--compute-count', '-c'], type=int, default=4)
c.argument('tags', tags_type)
with self.argument_context('openshift create', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument('name', validator=validate_linux_host_name)
c.argument('compute_vm_size', options_list=['--compute-vm-size', '-s'])
c.argument('customer_admin_group_id', options_list=[
'--customer-admin-group-id'])
c.argument('workspace_id')
with self.argument_context('openshift monitor enable', resource_type=ResourceType.MGMT_CONTAINERSERVICE, operation_group='open_shift_managed_clusters') as c:
c.argument(
'workspace_id', help='The resource ID of an existing Log Analytics Workspace to use for storing monitoring data.')
for scope in ['aks snapshot create']:
with self.argument_context(scope) as c:
c.argument('snapshot_name', options_list=['--name', '-n'], required=True, validator=validate_linux_host_name, help='The snapshot name.')
c.argument('tags', tags_type)
c.argument('nodepool_id', required=True, validator=validate_nodepool_id, help='The nodepool id.')
c.argument('aks_custom_headers')
for scope in ['aks snapshot show', 'aks snapshot delete']:
with self.argument_context(scope) as c:
c.argument('snapshot_name', options_list=['--name', '-n'], required=True, validator=validate_linux_host_name, help='The snapshot name.')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
def _get_default_install_location(exe_name):
system = platform.system()
if system == 'Windows':
home_dir = os.environ.get('USERPROFILE')
if not home_dir:
return None
install_location = os.path.join(
home_dir, r'.azure-{0}\{0}.exe'.format(exe_name))
elif system in ('Linux', 'Darwin'):
install_location = '/usr/local/bin/{}'.format(exe_name)
else:
install_location = None
return install_location
def _get_feature_in_preview_message():
return "Feature in preview, only in " + ", ".join(regions_in_preview) + ". "
|
the-stack_106_25828 | # (c) 2012, Daniel Hokka Zakrisson <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: file
author: Daniel Hokka Zakrisson (!UNKNOWN) <[email protected]>
version_added: "0.9"
short_description: read file contents
description:
- This lookup returns the contents from a file on the Ansible controller's file system.
options:
_terms:
description: path(s) of files to read
required: True
rstrip:
description: whether or not to remove whitespace from the ending of the looked-up file
type: bool
required: False
default: True
lstrip:
description: whether or not to remove whitespace from the beginning of the looked-up file
type: bool
required: False
default: False
notes:
- if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.
- this lookup does not understand 'globing', use the fileglob lookup instead.
"""
EXAMPLES = """
- debug: msg="the value of foo.txt is {{lookup('file', '/etc/foo.txt') }}"
- name: display multiple file contents
debug: var=item
with_file:
- "/path/to/foo.txt"
- "bar.txt" # will be looked in files/ dir relative to play or in role
- "/path/to/biz.txt"
"""
RETURN = """
_raw:
description:
- content of file(s)
type: list
elements: str
"""
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_text
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
display.debug("File lookup term: %s" % term)
# Find the file in the expected search path
lookupfile = self.find_file_in_search_path(variables, 'files', term)
display.vvvv(u"File lookup using %s as file" % lookupfile)
try:
if lookupfile:
b_contents, show_data = self._loader._get_file_contents(lookupfile)
contents = to_text(b_contents, errors='surrogate_or_strict')
if kwargs.get('lstrip', False):
contents = contents.lstrip()
if kwargs.get('rstrip', True):
contents = contents.rstrip()
ret.append(contents)
else:
raise AnsibleParserError()
except AnsibleParserError:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
|
the-stack_106_25829 | # DADSA - Assignment 1
# Reece Benson
from os import system as call
from collections import OrderedDict
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current_menu = 0
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = OrderedDict()
# Main Menu
self._menu["main"] = OrderedDict([("New Season", "new_season"), ("Load Season", "load_season")])
# New Season Menu
self._menu["new_season"] = OrderedDict([("Players", "ns_players"), ("Tournaments", "ns_tournaments"), ("Prize Money", "ns_prizemoney"), ("Difficulty", "ns_difficulty")])
# Load Season Menu
self._menu["load_season"] = OrderedDict()
# Append our Seasons to the "Load Season" Menu
for seasonId in self._app.handler.get_seasons():
season = self._app.handler.get_season(seasonId)
self._menu["load_season"].update({ season.name(): "load_season_"+str(seasonId) })
# Display our Menu
self.display()
def display(self, index = None, error = None):
# Clear our terminal window
call("cls")
# Define our variables
cur_count = 0
menu_item = self.get_menu(index or self.get_current_menu_index())
# Error Handling
if(error != None):
print("\n", "Error!", error, "\n")
# Menu Title, set tree
print("Please select an option: {}".format("(/)"))
menu_counter = 0
for m in self._menu[menu_item]:
# Increase our Counter
menu_counter += 1
# Is the Menu Item a Function?
m_type = None
if(callable(m)): m_type = ""
else: m_type = "->"
# Print our Menu Item
print("{0}. {1} {2}".format(menu_counter, m, m_type))
# Get User Input
self.get_input()
def get_current_menu_index(self):
return self._current_menu
def set_current_menu_index(self, new_index):
self._current_menu = new_index
def get_menu_name(self, index):
try:
menu_name = [ (v) for k,v in enumerate(self._menu) if(k == index) ][0]
return menu_name
except IndexError:
return None
def get_menu(self, index):
menu_item = self.get_menu_name(index)
return menu_item
def get_input(self):
# Wrap this in a try/except to validate any errors with input
try:
# Get users input
resp = int(input('>>> '))
# Validate some set input calls
if(resp == "exit"):
raise KeyboardInterrupt
elif(resp == ""):
return self.display(None, "Please select a valid option!")
# Attempt index
new_menu = self.get_menu_name(int(resp))
self.set_current_menu_index(int(resp))
self.display()
except KeyboardInterrupt:
self._app.exit()
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action") |
the-stack_106_25831 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the yaml utils module."""
import io
import random
import string
from collections import OrderedDict
from aea.helpers.yaml_utils import (
_AEAYamlLoader,
yaml_dump,
yaml_dump_all,
yaml_load,
yaml_load_all,
)
def test_yaml_dump_load():
"""Test yaml dump/load works."""
data = OrderedDict({"a": 12, "b": None})
stream = io.StringIO()
yaml_dump(data, stream)
stream.seek(0)
loaded_data = yaml_load(stream)
assert loaded_data == data
def test_yaml_dump_all_load_all():
"""Test yaml_dump_all and yaml_load_all."""
f = io.StringIO()
data = [{"a": "12"}, {"b": "13"}]
yaml_dump_all(data, f)
f.seek(0)
assert yaml_load_all(f) == data
def test_instantiate_loader_twice():
"""Test that instantiating the AEA YAML loader twice doesn't add twice implicit resolvers."""
loader = _AEAYamlLoader(io.StringIO())
old_length = len(loader.yaml_implicit_resolvers)
loader = _AEAYamlLoader(io.StringIO())
assert len(loader.yaml_implicit_resolvers) == old_length
def _generate_random_string(n: int = 100):
return "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(n) # nosec
)
|
the-stack_106_25832 | """
testplan.py -
NOTE: this module should not be run as a standalone scripts, excepts for
built-in tests.
"""
# HISTORY ####################################################################
#
# 1 Apr11 MR # initial version
# 2 Jan12 MR # simplification: Configurations are no-more; test plan carries the list of test cases
# 3 Dec14 MR # ported to Py3
#
##############################################################################
__description__ = "TestPlan class implementation"
__version__ = "3"
__author__ = "Miran R."
import json
import io
from pyrus.core.testable import _Testable
from pyrus.core.action import AutomatedAction, NoOpAction, ManualAction, ActionJsonDecoder
from pyrus.core.sut import SystemUnderTest, SutJsonDecoder
from pyrus.core.testcase import TestCase, TestCaseJsonDecoder
from pyrus.core.teststep import TestStep
from functools import reduce
class TestPlan(_Testable):
"""
TestPlan -
"""
def __init__(self, name, setup=None, cleanup=None, cases=None, sut=None):
assert name is not None
super(TestPlan, self).__init__(name, setup, cleanup)
self._cases = cases if cases is not None else []
self._sut = sut if sut is not None else SystemUnderTest("empty SUT")
def __str__(self):
s = "\n".join(("test plan: {}". format(self.name),
" setup={}".format(str(self.setup)),
" cleanup={}".format(str(self.cleanup)),
" cases={}".format([str(s) for s in self.testcases]),
))
return s
@property
def testcases(self):
return self._cases
@property
def systemUnderTest(self):
return self._sut
def addTestCase(self, case):
assert isinstance(case, TestCase)
self._cases.append(case)
def toJson(self):
""" """
return json.dumps(self, cls=_TestPlanJsonEncoder, indent=4)
def toText(self):
""" """
pass
def toXml(self):
""" """
sut = self.systemUnderTest.toXml()
n = """<TestPlan name="{}">""".format(self.name)
s = "<Setup>\n{}</Setup>".format(self.setup.toXml())
c = "<Cleanup>\n{}</Cleanup>".format(self.cleanup.toXml())
cases = reduce(lambda x,y: "\n".join((x, y)),
[case.toXml() for case in self.testcases])
return "\n".join((n, sut, s, c, cases, "</TestPlan>\n"))
def writeJson(self, filename):
""" """
assert filename is not None
fout = open(filename, "w")
json.dump(self, fout, cls=_TestPlanJsonEncoder, indent=4)
fout.close()
class _TestPlanJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for TestPlan class"""
def default(self, obj):
if isinstance(obj, TestPlan):
d = dict()
d["name"] = obj.name
d["SUT"] = obj.systemUnderTest.toJson()
d["setup"] = obj.setup.toJson()
d["cleanup"] = obj.cleanup.toJson()
d["testcases"] = [i.toJson() for i in obj.testcases]
return d
return json.JSONEncoder.default(self, obj)
class TestPlanJsonDecoder(json.JSONDecoder):
"""Custom JSON decoder for the TestPlan class"""
def decode(self, jsontext):
tsDict = json.loads(jsontext)
#
name = "Untitled Test Plan"
setup = []
cleanup = []
cases = None
if "name" in tsDict:
name = tsDict["name"]
if "setup" in tsDict:
setup = ActionJsonDecoder().decode(tsDict["setup"])
if "cleanup" in tsDict:
cleanup = ActionJsonDecoder().decode(tsDict["cleanup"])
if "SUT" in tsDict:
sut = SutJsonDecoder().decode(tsDict["SUT"])
if "testcases" in tsDict:
cases=[]
for c in tsDict["testcases"]:
cases.append(TestCaseJsonDecoder().decode(c))
assert cases is not None, "Test plan needs a test case or two..."
return TestPlan(name, setup, cleanup, cases)
# TESTING ####################################################################
FILENAME = "test/testset.json"
def runtests():
print( "Starting unit tests...")
ts = TestPlan("a testplan ")
print(str(ts))
print(ts.toJson())
print()
# add some setup and cleanup actions
a1 = AutomatedAction("/this/is/a/script/path", "arg1")
a2 = AutomatedAction("/this/is/a/different/script/path", "arg1 arg2")
ts.setup = a1
ts.cleanup = a2
print(str(ts))
print()
# add some test steps
s1 = TestStep("the first step")
s2 = TestStep("the second step")
s3 = TestStep("the third step")
# add some test steps
c1 = TestCase("the first case")
c2 = TestCase("the second case")
c3 = TestCase("the third case")
c1.Setup = a1
c1.Cleanup = a2
c2.Setup = a2
c2.Cleanup = a2
c3.Setup = a1
c3.Cleanup = a1
c1.addStep(s1)
c1.addStep(s2)
c1.addStep(s3)
c2.addStep(s3)
c2.addStep(s2)
c2.addStep(s1)
c3.addStep(s3)
#
ts.addTestCase(c1)
ts.addTestCase(c2)
ts.addTestCase(c3)
ts.addTestCase(c3)
ts.addTestCase(c2)
ts.addTestCase(c1)
ts.addTestCase(c2)
ts.addTestCase(c1)
ts.addTestCase(c3)
print(str(ts))
print()
j = ts.toJson()
print(j)
c = TestPlanJsonDecoder().decode(j)
print("type: {}\ndata='{}'".format(type(c), str(c)))
ts.writeJson(FILENAME)
print("XML={}".format(ts.toXml()))
print("Stop.")
if __name__ == '__main__':
print(__doc__)
runtests()
|
the-stack_106_25834 | from typing import cast
from .. import imaging
from ..pack import PackCollection
from ..ex.language import Language
class IconHelper(object):
ICON_FILE_FORMAT = 'ui/icon/{0:03d}000/{1}{2:06d}.tex'
@staticmethod
def get_icon(pack: PackCollection,
nr: int,
language: Language = None,
type: str = None):
if language is not None:
type = language.get_code()
if len(type) > 0:
type += '/'
if type is None:
type = ''
if len(type) > 0 and not type.endswith('/'):
type += '/'
file_path = IconHelper.ICON_FILE_FORMAT.format(int(nr / 1000), type, nr)
file = pack.get_file(file_path)
if file is None and len(type) > 0:
# Couldn't get specific type, try for generic version.
file_path = IconHelper.ICON_FILE_FORMAT.format(int(nr / 1000), '', nr)
file = pack.get_file(file_path)
if file is None:
# Couldn't get generic version either, that's a shame :(
pass
return cast(imaging.ImageFile, file)
|
the-stack_106_25835 | '''
This code is inspired on https://github.com/jrieke/shape-detection/
'''
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import datetime
import random
# import cairo,math
from skimage.draw import circle, polygon
from tqdm import tqdm
import h5py, os
class HelloWorldDataset:
def __init__(self, num_imgs = 50000, min_object_size = 2, max_object_size = 10,
num_objects = 1, img_size = 16, train_proportion = 0.8,
shape_number=2,allow_overlap=False):
self.num_imgs = num_imgs
self.min_object_size = min_object_size
self.max_object_size = max_object_size
self.num_objects = num_objects
self.WIDTH = img_size #For now this should work only for square.
self.HEIGHT = img_size
self.img_size = img_size
if not (self.img_size == self.WIDTH == self.HEIGHT):
raise Exception('For now, we support only squared images.')
self.train_proportion = train_proportion
self.test_bboxes = None
if shape_number > 3:
raise Exception("For now, we support only a maximum of 3 shapes.")
self.shape_number = shape_number
self.shape_labels = ['rectangle','circle','triangle']
self.shape_labels_colors = ['g','y','c']
self.allow_overlap = allow_overlap
def save_dataset(self,dataset_path=None, imgs=[],y=[]):
if not dataset_path or len(imgs)==0 or len(y)==0:
raise Exception('Missing parameters.')
os.makedirs(dataset_path, exist_ok=True)
imgs_file_path = "{}/{}_imgs.hdf5".format(dataset_path,self.get_dataset_name())
y_file_path = "{}/{}_y.hdf5".format(dataset_path,self.get_dataset_name())
with h5py.File(imgs_file_path, "w") as imgs_f:
imgs_dataset = imgs_f.create_dataset("imgs", (self.num_imgs, self.WIDTH, self.HEIGHT) , dtype='f',data=imgs)
with h5py.File(y_file_path, "w") as y_f:
y_dataset = y_f.create_dataset("y", (self.num_imgs, self.num_objects, 5), dtype='f', data=y)
def load_or_generate(self,dataset_path=None,save=True):
if not dataset_path:
raise Exception('The dataset_path must be provided.')
print('Trying to load dataset...')
imgs_file_path = "{}/{}_imgs.hdf5".format(dataset_path,self.get_dataset_name())
y_file_path = "{}/{}_y.hdf5".format(dataset_path,self.get_dataset_name())
if os.path.exists(imgs_file_path) and os.path.exists(y_file_path):
with h5py.File(imgs_file_path, "r") as imgs_f:
# imgs_dataset = imgs_f.create_dataset("imgs", (self.num_imgs, self.WIDTH, self.HEIGHT), dtype='f')
# self.imgs = np.zeros((self.num_imgs, self.WIDTH, self.HEIGHT),dtype=np.double)
# imgs_dataset.read_direct(self.imgs)
self.imgs = np.array(imgs_f['imgs'])
with h5py.File(y_file_path, "r") as y_f:
# y_dataset = y_f.create_dataset("y", (self.num_imgs, self.num_objects, 5), dtype='f')
# self.y = np.zeros((self.num_imgs, self.num_objects, 5))
# y_dataset.read_direct(self.y)
self.y = np.array(y_f['y'])
return self.split_dataset()#train_X, train_y, test_X, test_y
else:
print('The dataset has not been found in the disk. Gererating it...')
return self.generate(save_path=dataset_path) #train_X, train_y, test_X, test_y
def generate(self,save_path=None):
print('Generating the dataset...')
self.y = np.zeros((self.num_imgs, self.num_objects, 5)) #one for the class
self.imgs = np.zeros((self.num_imgs, self.WIDTH, self.HEIGHT),dtype=np.double)
for i_img in tqdm(range(self.num_imgs)):
has_overlap = True
#Through brute force we are going to generate only objects with low overlap.
while has_overlap:
#reset data
self.y[i_img,:,:] = .0
self.imgs[i_img,:,:] = .0
#TODO : randomize the number of objects in each image
for i_object in range(self.num_objects):
shape = np.random.randint(self.shape_number)
if shape == 0: # rectangle
w, h = np.random.randint(self.min_object_size, self.max_object_size, size=2)
x = np.random.randint(0, self.img_size - w)
y = np.random.randint(0, self.img_size - h)
rect_vertices = np.array((
(y,x),
(y,x+w),
(y+h,x+w),
(y+h,x),
(y,x)
))
rr, cc = polygon(rect_vertices[:, 0], rect_vertices[:, 1],(self.img_size,self.img_size))
yolo_bbox = self.from_default_to_yolo([x, y, w, h])
self.validate_bbox(yolo_bbox,annot_type='yolo')
self.y[i_img, i_object] = np.concatenate((yolo_bbox,[shape]))
self.imgs[i_img,rr,cc] = 1
elif shape == 1: # circle
d = np.random.randint(8, self.max_object_size) #diameter
r = int(0.5 * d) # radius
x = np.random.randint(r, self.img_size - d)
y = np.random.randint(r, self.img_size - d)
w = d
h = d
yolo_bbox = self.from_default_to_yolo([x, y, w, h])
self.validate_bbox(yolo_bbox,annot_type='yolo')
denormalized_yolo_bbox = self.denormalize(yolo_bbox)
rr, cc = circle(denormalized_yolo_bbox[1],denormalized_yolo_bbox[0], r,(self.img_size,self.img_size))
self.y[i_img, i_object] = np.concatenate((yolo_bbox,[shape]))
self.imgs[i_img,rr,cc] = 1
elif shape == 2: # triangle
size = np.random.randint(3, self.max_object_size)
x = np.random.randint(0, self.img_size - size)
y = np.random.randint(0, self.img_size - size)
triangle_vertices = np.array((
(y,x),
(y,x+size),
(y+size, x),
(y,x)
))
rr, cc = polygon(triangle_vertices[:, 0], triangle_vertices[:, 1],(self.img_size,self.img_size))
yolo_bbox = self.from_default_to_yolo([x, y, size, size])
self.validate_bbox(yolo_bbox,annot_type='yolo')
self.y[i_img, i_object] = np.concatenate((yolo_bbox,[shape]))
self.imgs[i_img,rr,cc] = 1
accumulated_iou = 0
for i_object_compare in range(self.num_objects):
for i_object_other in range(self.num_objects):
if i_object_other == i_object_compare:
#do not compare the same object.
continue
accumulated_iou += self.bbox_iou_centered(
self.denormalize(self.y[i_img][i_object_compare]),
self.denormalize(self.y[i_img][i_object_other]))
if self.allow_overlap:
has_overlap = False
else:
has_overlap = True if accumulated_iou > 0.0 else False
print("Shapes: imgs ", self.imgs.shape)
print('Dataset: y shape', self.y.shape)
if save_path:
self.save_dataset(dataset_path=save_path, imgs=self.imgs,y=self.y)
return self.split_dataset() #train_X, train_y, test_X, test_y
def split_dataset(self):
# Split training and test.
i = int(self.train_proportion * self.num_imgs)
train_X = self.imgs[:i] #80% for training
test_X = self.imgs[i:]
train_y = self.y[:i]
test_y = self.y[i:]
self.test_imgs = self.imgs[i:]
self.test_bboxes = self.y[i:]
return train_X, train_y, test_X, test_y
def get_dataset_name(self):
return "dataset_{}{}{}{}{}{}{}{}".format(self.num_imgs,self.min_object_size,self.max_object_size,self.num_objects,self.img_size,self.train_proportion,self.shape_number,self.allow_overlap)
def generate_cairo(self):
raise Exception('This generates images with dtype=np.uint8 which is incompatible with tensorflow operations')
self.y = np.zeros((self.num_imgs, self.num_objects, 5)) #one for the class
self.imgs = np.zeros((self.num_imgs, self.WIDTH, self.HEIGHT,4),dtype=np.uint8)
for i_img in range(self.num_imgs):
has_overlap = True
#Through brute force we are going to generate only objects with low overlap.
while has_overlap:
surface = cairo.ImageSurface.create_for_data(self.imgs[i_img], cairo.FORMAT_ARGB32, self.img_size, self.img_size)
cr = cairo.Context(surface)
# Fill background white.
cr.set_source_rgb(1, 1, 1)
cr.paint()
#TODO : randomize the number of objects in each image
# for i_object in range(np.random.randint(self.num_objects)+1):
for i_object in range(self.num_objects):
shape = np.random.randint(self.shape_number)
if shape == 0: # rectangle
w, h = np.random.randint(self.min_object_size, self.max_object_size, size=2)
x = np.random.randint(0, self.img_size - w)
y = np.random.randint(0, self.img_size - h)
cr.rectangle(x, y, w, h)
yolo_bbox = self.from_default_to_yolo([x, y, w, h])
self.validate_bbox(yolo_bbox,annot_type='yolo')
self.y[i_img, i_object] = np.concatenate((yolo_bbox,[shape]))
elif shape == 1: # circle
r = int(0.5 * np.random.randint(4, self.max_object_size))
x = np.random.randint(r, self.img_size - r)
y = np.random.randint(r, self.img_size - r)
cr.arc(x, y, r, 0, 2*np.pi)
x = x - r
y = y - r
w = 2 * r
h = w
yolo_bbox = self.from_default_to_yolo([x, y, w, h])
self.validate_bbox(yolo_bbox,annot_type='yolo')
self.y[i_img, i_object] = np.concatenate((yolo_bbox,[shape]))
elif shape == 2: # triangle
size = np.random.randint(3, self.max_object_size)
x = np.random.randint(0, self.img_size - size)
y = np.random.randint(0, self.img_size - size)
cr.move_to(x, y)
cr.line_to(x+size, y)
cr.line_to(x+size, y+size)
cr.line_to(x, y)
cr.close_path()
yolo_bbox = self.from_default_to_yolo([x, y, size, size])
self.validate_bbox(yolo_bbox,annot_type='yolo')
self.y[i_img, i_object] = np.concatenate((yolo_bbox,[shape]))
cr.set_source_rgb(0,0,0)
cr.fill()
accumulated_iou = 0
for i_object_compare in range(self.num_objects):
for i_object_other in range(self.num_objects):
if i_object_other == i_object_compare:
#do not compare the same object.
continue
accumulated_iou += self.bbox_iou_centered(self.y[i_img][i_object_compare],self.y[i_img][i_object_other])
has_overlap = True if accumulated_iou > 0.2 else False
self.imgs = self.imgs[..., 2::-1] # change to RGB
print("Shapes: imgs ", self.imgs.shape)
print('Dataset: y shape', self.y.shape)
# Split training and test.
i = int(self.train_proportion * self.num_imgs)
train_X = self.imgs[:i] #80% for training
test_X = self.imgs[i:]
train_y = self.y[:i]
test_y = self.y[i:]
self.test_imgs = self.imgs[i:]
self.test_bboxes = self.y[i:]
print('inside the generated',self.y[0],test_y[0],train_y[0])
return train_X, train_y, test_X, test_y
def generate_old(self):
print('Generating...')
self.y = np.zeros((self.num_imgs, self.num_objects, 5)) #one for the class
self.imgs = np.zeros((self.num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0
# self.shapes = np.zeros((self.num_imgs, self.num_objects), dtype=int)
for i_img in range(self.num_imgs):
for i_object in range(self.num_objects):
shape = np.random.randint(self.shape_number)
if shape == 0:
w, h = np.random.randint(self.min_object_size, self.max_object_size, size=2)
x = np.random.randint(0, self.WIDTH - w)
y = np.random.randint(0, self.HEIGHT - h)
self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to 1
x_center = x + int(w/2)
y_center = y + int(h/2)
coords = np.array([x_center, y_center, w, h]) / self.img_size
print('->',[x_center, y_center, w, h],coords)
self.y[i_img, i_object] = np.concatenate((coords,[shape]))
elif shape == 1:
size = np.random.randint(self.min_object_size, self.max_object_size)
x = np.random.randint(0, self.WIDTH - size)
y = np.random.randint(0, self.HEIGHT - size)
mask = np.tril_indices(size)
self.imgs[i_img, y + mask[0], x + mask[1]] = 1.
x_center = x + int(size/2)
y_center = y + int(size/2)
coords = np.array([x_center, y_center, size, size]) / self.img_size
self.y[i_img, i_object] = np.concatenate((coords,[shape]))
else:
raise Exception("Unsupported requested shape quantity.")
print("Shapes: imgs ", self.imgs.shape, " bboxes ", self.y.shape)
print('Dataset: y shape', self.y.shape)
# Split training and test.
i = int(self.train_proportion * self.num_imgs)
train_X = self.imgs[:i] #80% for training
test_X = self.imgs[i:]
train_y = self.y[:i]
test_y = self.y[i:]
self.test_imgs = self.imgs[i:]
self.test_bboxes = self.y[i:]
print('inside the generated',self.y[0],test_y[0],train_y[0])
return train_X, train_y, test_X, test_y
def bbox_iou_centered(self,boxA,boxB):
A_x1, A_y1, A_w, A_h = boxA[0], boxA[1], boxA[2], boxA[3]
A_x1 = A_x1 - int(A_w/2)
A_y1 = A_y1 - int(A_h/2)
B_x1, B_y1, B_w, B_h = boxB[0], boxB[1], boxB[2], boxB[3]
B_x1 = B_x1 - int(B_w/2)
B_y1 = B_y1 - int(B_h/2)
# print(A_x1,A_y1)
return self.bbox_iou([A_x1, A_y1, A_w, A_h],[B_x1, B_y1, B_w, B_h])
def bbox_iou(self,boxA, boxB):
#From: https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# A) x1,y2,w,h
A_x1, A_y1, A_w, A_h = boxA[0], boxA[1], boxA[2], boxA[3]
# A) x2,y2
A_x2, A_y2 = A_x1 + A_w - 1, A_y1 + A_h - 1
# B) x1,y2,w,h
B_x1, B_y1, B_w, B_h = boxB[0], boxB[1], boxB[2], boxB[3]
# B) x2,y2
B_x2, B_y2 = B_x1 + B_w - 1, B_y1 + B_h - 1
xA = max(A_x1, B_x1)
yA = max(A_y1, B_y1)
xB = min(A_x2, B_x2)
yB = min(A_y2, B_y2)
interArea = max(0,(xB - xA + 1)) * max(0,(yB - yA + 1))
boxAArea = (A_x2 - A_x1 + 1) * (A_y2 - A_y1 + 1)
boxBArea = (B_x2 - B_x1 + 1) * (B_y2 - B_y1 + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def from_default_to_yolo(self,annot):
'''
from
not normalized : topleft_x,topleft_y,width,height
to
normalized : center_x,center_y,width,height
'''
topleft_x,topleft_y,width,height = annot[0],annot[1],annot[2],annot[3]
# print('topleft_x,topleft_y,width,height',topleft_x,topleft_y,width,height)
center_x = topleft_x + int(width/2)
center_y = topleft_y + int(height/2)
return np.array([center_x,center_y,width,height]) / self.img_size # normalize
def from_yolo_to_default(self,annot):
'''
from
normalized : center_x,center_y,width,height
to
not normalized : topleft_x,topleft_y,width,height
'''
# Be aware the class (annot[4]) has been messed up with this denormalization.
annot = np.multiply(annot, self.img_size) #denormalize
center_x,center_y,width,height = annot[0],annot[1],annot[2],annot[3]
# print('center_x,center_y,width,height',center_x,center_y,width,height)
topleft_x = center_x - int(width/2)
topleft_y = center_y - int(height/2)
return [topleft_x,topleft_y,width,height]
def denormalize(self,annot):
if len(annot) == 5:
bkp_class = annot[4]
annot = np.multiply(annot, self.img_size)
annot[4] = bkp_class
else:
annot = np.multiply(annot, self.img_size)
return annot
def validate_bbox(self,annot,annot_type=None):
if annot_type == 'yolo':
annot = self.from_yolo_to_default(annot)
else:
raise Exception('undefined annot_type')
topleft_x,topleft_y,width,height = annot[0],annot[1],annot[2],annot[3]
if (topleft_x < 0 or topleft_x + width > self.img_size) or (topleft_y < 0 or topleft_y + height > self.img_size) :
print('topleft_x,topleft_y,width,height -> ', topleft_x,topleft_y,width,height)
raise Exception('bbox does not fit to image dimensions')
def convertDefaultAnnotToCoord(self, annot):
raise Exception('Check normalizations')
'''
annot -> [x, y, w, h]
'''
w = annot[2] * self.WIDTH
h = annot[3] * self.HEIGHT
x = annot[0] * self.HEIGHT
y = annot[1] * self.HEIGHT
return [x,y,w,h]
def convertYoloAnnotToCoord(self, yolo_annot):
raise Exception('Check normalizations')
'''
yolo_annot -> [x, y, w, h]
'''
print(yolo_annot,self.WIDTH)
w = yolo_annot[2] * self.WIDTH
h = yolo_annot[3] * self.HEIGHT
x = (yolo_annot[0] * self.WIDTH) - (w/2)
y = (yolo_annot[1] * self.HEIGHT) - (h/2)
return [x,y,w,h]
def show_generated(self):
fig = plt.figure(figsize=(12, 3))
fig.subplots_adjust(top=0.85)
fig.suptitle('Samples from the dataset.')
legend_plotted = False
for i_subplot in range(1, 6):
i_img = i_subplot-1
plt.subplot(1, 5, i_subplot)
# plt.imshow(self.imgs[i_img],cmap=plt.cm.gray)
plt.imshow(self.imgs[i_img],cmap='Greys', interpolation='none', origin='lower', extent=[0, self.img_size, 0, self.img_size])
for i_obj, obj_y in enumerate(self.y[i_img]):
x,y,w,h,gt_class = obj_y[0], obj_y[1], obj_y[2], obj_y[3], int(obj_y[4])
gt_bbox = self.from_yolo_to_default([x,y,w,h])
print('img {} obj {} bbox {} class {}'.format(i_img, i_obj, gt_bbox, gt_class))
# plt.gca().set_ylim(0, self.img_size)
# plt.gca().set_xlim(0, self.img_size)
plt.gca().add_patch(matplotlib.patches.Rectangle((gt_bbox[0], gt_bbox[1]), gt_bbox[2], gt_bbox[3], ec='b', fc='none'))
plt.annotate(
'{}'.format(self.shape_labels[gt_class]),
(gt_bbox[0], gt_bbox[1]+gt_bbox[3]+0.2),
color=self.shape_labels_colors[gt_class])
if not legend_plotted:
legend_plotted = True
plt.gca().legend(['GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True)
plt.show()
def plot_rectangle(self, img, bbox):
fig = plt.figure()
fig.suptitle('Plotting rectangle.')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 1, 1)
plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.show()
def show_predicted(self, predictions, gt, show_gt=False):
if len(predictions) == 0:
print('There are no predictions to plot.')
return
fig = plt.figure(figsize=(12, 3))
fig.subplots_adjust(top=0.85)
fig.suptitle('Prediction demonstration. Random samples.')
legend_plotted = False
for i_subplot in range(1, 6):
plt.subplot(1, 5, i_subplot)
plt.imshow(self.test_imgs[i_subplot-1], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.img_size, 0, self.img_size])
if show_gt:
for gt_data in gt[i_subplot-1]:
gt_bbox = self.from_yolo_to_default([gt_data[0], gt_data[1], gt_data[2], gt_data[3]])
plt.gca().add_patch(matplotlib.patches.Rectangle((gt_bbox[0], gt_bbox[1]), gt_bbox[2], gt_bbox[3], ec='b', fc='none', lw=1.0, ls='solid'))
for pred_data in predictions[i_subplot-1]:
x,y,w,h,pred_class = pred_data[0], pred_data[1], pred_data[2], pred_data[3], pred_data[4]
pred_bbox = self.from_yolo_to_default([x,y,w,h])
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none',lw=1.0,ls='solid'))
plt.annotate('{}'.format(self.shape_labels[int(pred_class)]), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2),
color=self.shape_labels_colors[int(pred_class)])
if not legend_plotted:
legend_plotted = True
plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True)
plt.show()
def grv_mean_iou(self,pred,gt):
print('Calculating IOU.')
print('#WARNING: This function needs to be improved. There may be a way to achieve a better iou when relating gt x pred bboxes.')
if len(pred) == 0:
print('There are no predictions to calculate mean_iou.')
return 0.0,[]
pred = np.copy(pred)
gt = np.copy(gt)
UNAVAILABLE_FLAG = 1
pred_discard_control = []
for p_bboxes in pred:
'''
Build a list of zeros according to the number of pred bboxes.
Each image prediction can output a different number of bboxes. That is not good
to work in numpy, mainly because the general shape is undefined.
We put a zero for each predicted bbox, meaning they are available.
Putting a UNAVAILABLE_FLAG in a determinated position means that bbox is unavailable.
'''
pred_discard_control.append(np.zeros((len(p_bboxes))))
iou_scores = [] #average iou per image
# get the gts for every image
for img_index, img_gt_bboxes in enumerate(gt):
img_iou_scores = []
#get the gts for a specific image
for gt_bbox in img_gt_bboxes:
#holds iou scores for all predictions for this image in relation to this gt.
gt_bbox_iou_scores = []
#get the predicitions for the same image
for pred_index, pred_bbox in enumerate(pred[img_index]):
#check availability
if pred_discard_control[img_index][pred_index] == UNAVAILABLE_FLAG:
continue
#calculate the iou of all predictions for this gt.
iou = self.bbox_iou_centered(pred_bbox, gt_bbox)
# print('comparing pred, gt, iou',pred_bbox,gt_bbox,iou)
gt_bbox_iou_scores.append(iou)
# if there are usable predicitions.
if len(gt_bbox_iou_scores) > 0:
# here we find the best predicition for this gt.
# print('gt_bbox_iou_scores',gt_bbox_iou_scores)
best_pred = np.argmax(gt_bbox_iou_scores)
# print('for gt_bbox the best_iou',gt_bbox, gt_bbox_iou_scores[best_pred])
img_iou_scores.append(gt_bbox_iou_scores[best_pred]) #save the best iou for the gt
#Mark as unavailable, so that it cannot be reused for other gts
pred_discard_control[img_index][best_pred] = UNAVAILABLE_FLAG
#now we average the iou scores for this image and save it.
if len(img_iou_scores) > 0:
iou_scores.append(np.average(img_iou_scores))
else:
iou_scores.append(0.0)
return np.average(iou_scores),iou_scores
def grv_mean_iou_old(self,pred,gt):
print('Calculating IOU.')
print('#This function needs to be improved. There may be a way to achieve a better iou when relating gt x pred bboxes.')
_pred = np.copy(pred)
gt = np.copy(gt)
#add an extra column as flag. If the value is different than zero the bbox should not be considered anymore
print('_pred.shape',_pred.shape)
control_column_idx = _pred.shape[2]
DISCARDED_FLAG = 1
pred = np.zeros((_pred.shape[0],_pred.shape[1],control_column_idx+1))
pred[:,:,:-1] = _pred
iou_scores = [] #average iou per image
# get the gts for every image
for img_index, img_gt_bboxes in enumerate(gt):
img_iou_scores = []
#get the gts for a specific image
for gt_bbox in img_gt_bboxes:
#holds iou scores for all predictions for this image in relation to this gt.
gt_bbox_iou_scores = []
#get the predicitions for the same image
for pred_bbox in pred[img_index]:
if pred_bbox[control_column_idx] == DISCARDED_FLAG:
continue
#calculate the iou of all predictions for this gt.
iou = self.bbox_iou_centered(pred_bbox, gt_bbox)
# print('comparing pred, gt, iou',pred_bbox,gt_bbox,iou)
gt_bbox_iou_scores.append(iou)
# if there are usable predicitions.
if len(gt_bbox_iou_scores) > 0:
# here we find the best predicition for this gt.
# print('gt_bbox_iou_scores',gt_bbox_iou_scores)
best_pred = np.argmax(gt_bbox_iou_scores)
# print('for gt_bbox the best_iou',gt_bbox, gt_bbox_iou_scores[best_pred])
img_iou_scores.append(gt_bbox_iou_scores[best_pred]) #save the best iou for the gt
#Mask to discard, so that it cannot be reused for other gts
pred[img_index][best_pred][control_column_idx] = DISCARDED_FLAG
#now we average the iou scores for this image and save it.
iou_scores.append(np.average(img_iou_scores))
return np.average(iou_scores),iou_scores
|
the-stack_106_25837 | # NOTE:
# Most of these functions are based off pathsim.py from the torps project,
# but this code is neither reviewed nor endorsed by the torps authors.
# Torps is a relatively straightforward Python port of tor's path selection
# algorithm. The original torps code and licensing information can be
# found at: https://github.com/torps/torps
import random
from stem import Flag
# TODO: docs
# TODO: mention in docs the assumptions made here
# (i.e. primarily that node fprints are guaranteed to be in descriptors)
def nodeUsableWithOther(desc1, status_entry1, desc2, status_entry2):
# return True if test_node is usable in a circuit with node
# check:
# - nodes are not equal
# - nodes are not in same family
# - nodes are not in same /16 subnet
if status_entry1.fingerprint == status_entry2.fingerprint:
return False
if inSameFamily(desc1, status_entry1, desc2, status_entry2):
return False
if inSame16Subnet(status_entry1, status_entry2):
return False
return True
def selectWeightedNode(weighted_nodes):
"""Takes (node,cum_weight) pairs where non-negative cum_weight increases,
ending at 1. Use cum_weights as cumulative probablity to select a node."""
r = random.random()
begin = 0
end = len(weighted_nodes)-1
mid = int((end+begin)/2)
while True:
if r <= weighted_nodes[mid][1]:
if mid == begin:
return weighted_nodes[mid][0]
else:
end = mid
mid = int((end+begin)/2)
else:
if mid == end:
raise ValueError('Weights must sum to 1.')
else:
begin = mid+1
mid = int((end+begin)/2)
def getWeightedNodes(nodes, weights):
"""Takes list of nodes (rel_stats) and weights (as a dict) and outputs
a list of (node, cum_weight) pairs, where cum_weight is the cumulative
probability of the nodes weighted by weights.
"""
# compute total weight
total_weight = sum([float(weights[n]) for n in nodes])
if total_weight == 0:
raise ValueError('Error: Node list has total weight zero.')
# create cumulative weights
weighted_nodes = []
cum_weight = 0
for node in nodes:
cum_weight += weights[node] / total_weight
weighted_nodes.append((node, cum_weight))
return weighted_nodes
def getPositionWeights(nodes, cons_rel_stats, position, bw_weights,
bwweightscale):
"""Computes the consensus "bandwidth" weighted by position weights."""
weights = {}
bwweightscale = float(bwweightscale)
for node in nodes:
r = cons_rel_stats[node]
bw = float(r.bandwidth)
weight = float(getBwweight(r.flags, position, bw_weights))
weight_scaled = weight / bwweightscale
weights[node] = bw * weight_scaled
return weights
def getBwweight(flags, position, bw_weights):
"""Returns weight to apply to relay's bandwidth for given position.
flags: list of Flag values for relay from a consensus
position: position for which to find selection weight,
one of 'g' for guard, 'm' for middle, and 'e' for exit
bw_weights: bandwidth_weights from NetworkStatusDocumentV3 consensus
"""
if position == 'g':
if (Flag.GUARD in flags) and (Flag.EXIT in flags):
return bw_weights['Wgd']
elif Flag.GUARD in flags:
return bw_weights['Wgg']
elif Flag.EXIT not in flags:
return bw_weights['Wgm']
else:
raise ValueError('Wge weight does not exist.')
elif position == 'm':
if (Flag.GUARD in flags) and (Flag.EXIT in flags):
return bw_weights['Wmd']
elif Flag.GUARD in flags:
return bw_weights['Wmg']
elif Flag.EXIT in flags:
return bw_weights['Wme']
else:
return bw_weights['Wmm']
elif position == 'e':
if (Flag.GUARD in flags) and (Flag.EXIT in flags):
return bw_weights['Wed']
elif Flag.GUARD in flags:
return bw_weights['Weg']
elif Flag.EXIT in flags:
return bw_weights['Wee']
else:
return bw_weights['Wem']
else:
raise ValueError('Unrecognized position: {}.'.format(position))
def inSameFamily(desc1, status_entry1, desc2, status_entry2):
"""Takes list of descriptors and two node fingerprints,
checks if nodes list each other as in the same family."""
fprint1 = status_entry1.fingerprint
fprint2 = status_entry2.fingerprint
family1 = set([i.strip(u'$') for i in desc1.family])
family2 = set([i.strip(u'$') for i in desc2.family])
# True only if both nodes list each other
return (fprint1 in family2) and (fprint2 in family1)
# XXX: what do we do for IPv6?
def inSame16Subnet(status_entry1, status_entry2):
address1 = status_entry1.address
address2 = status_entry2.address
return address1.split('.')[:2] == address2.split('.')[:2]
|
the-stack_106_25838 | #Importing required libraries and dataset
import numpy as np
from nilearn.input_data import MultiNiftiMasker
from sklearn.linear_model import OrthogonalMatchingPursuit as OMP
from sklearn.feature_selection import f_classif, SelectKBest
from sklearn.pipeline import Pipeline
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score)
from matplotlib import pyplot as plt
from nilearn.plotting import show
from nilearn import datasets
#Fetching the Dataset
miyawaki_dataset = datasets.fetch_miyawaki2008()
X_random = miyawaki_dataset.func[12:]
X_figure = miyawaki_dataset.func[:12]
y_random = miyawaki_dataset.label[12:]
y_figure = miyawaki_dataset.label[:12]
y_shape = (10, 10)
#Load and mask fMRI data
masker = MultiNiftiMasker(mask_img=miyawaki_dataset.mask, detrend=True, standardize=False)
masker.fit()
X_train = masker.transform(X_random)
X_test = masker.transform(X_figure)
#We load the visual stimuli from csv files
y_train = []
for y in y_random:
y_train.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','), (-1,) + y_shape, order='F'))
y_test = []
for y in y_figure:
y_test.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','), (-1,) + y_shape, order='F'))
X_train = np.vstack([x[2:] for x in X_train])
y_train = np.vstack([y[:-2] for y in y_train]).astype(float)
X_test = np.vstack([x[2:] for x in X_test])
y_test = np.vstack([y[:-2] for y in y_test]).astype(float)
n_pixels = y_train.shape[1]
n_features = X_train.shape[1]
def flatten(list_of_2d_array):
flattened = []
for array in list_of_2d_array:
flattened.append(array.ravel())
return flattened
#Build the design matrix for multiscale computation
#Matrix is squared, y_rows == y_cols
y_cols = y_shape[1]
#Original data
design_matrix = np.eye(100)
#Example of matrix used for multiscale (sum pixels vertically)
#
# 0.5 *
#
# 1 1 0 0 0 0 0 0 0 0
# 0 1 1 0 0 0 0 0 0 0
# 0 0 1 1 0 0 0 0 0 0
# 0 0 0 1 1 0 0 0 0 0
# 0 0 0 0 1 1 0 0 0 0
# 0 0 0 0 0 1 1 0 0 0
# 0 0 0 0 0 0 1 1 0 0
# 0 0 0 0 0 0 0 1 1 0
# 0 0 0 0 0 0 0 0 1 1
height_tf = (np.eye(y_cols) + np.eye(y_cols, k=1))[:y_cols - 1] * .5
width_tf = height_tf.T
yt_tall = [np.dot(height_tf, m) for m in y_train]
yt_large = [np.dot(m, width_tf) for m in y_train]
yt_big = [np.dot(height_tf, np.dot(m, width_tf)) for m in y_train]
#Add it to the training set
y_train = [np.r_[y.ravel(), t.ravel(), l.ravel(), b.ravel()] for y, t, l, b in zip(y_train, yt_tall, yt_large, yt_big)]
y_test = np.asarray(flatten(y_test))
y_train = np.asarray(y_train)
#Remove rest period
X_train = X_train[y_train[:, 0] != -1]
y_train = y_train[y_train[:, 0] != -1]
X_test = X_test[y_test[:, 0] != -1]
y_test = y_test[y_test[:, 0] != -1]
#We define our Prediction function
#Create as many OMP(OrthogonalMatchingPursuit) as voxels to predict
clfs = []
n_clfs = y_train.shape[1]
for i in range(y_train.shape[1]):
clf = Pipeline([('selection', SelectKBest(f_classif, 500)), ('clf', OMP(n_nonzero_coefs=10))])
clf.fit(X_train, y_train[:, i])
clfs.append(clf)
#Run the prediction function
y_pred = []
for clf in clfs:
y_pred.append(clf.predict(X_test))
y_pred = np.asarray(y_pred).T
# We need to the multi scale reconstruction
def split_multi_scale(y, y_shape):
#Split data into 4 original multi_scale images
yw, yh = y_shape
#Index of original image
split_index = [yw * yh]
#Index of large image
split_index.append(split_index[-1] + (yw - 1) * yh)
#Index of tall image
split_index.append(split_index[-1] + yw * (yh - 1))
#Index of big image
split_index.append(split_index[-1] + (yw - 1) * (yh - 1))
#We split according to computed indices
y_preds = np.split(y, split_index, axis=1)
#y_pred is the original image
y_pred = y_preds[0]
#y_pred_tall is the image with 1x2 patch application. We have to make
#some calculus to get it back in original shape
height_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=-1))[:, :y_cols - 1] * .5
height_tf_i.flat[0] = 1
height_tf_i.flat[-1] = 1
y_pred_tall = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten()
for m in y_preds[1]]
y_pred_tall = np.asarray(y_pred_tall)
#y_pred_large is the image with 2x1 patch application. We have to make
#some calculus to get it back in original shape
width_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=1))[:y_cols - 1] * .5
width_tf_i.flat[0] = 1
width_tf_i.flat[-1] = 1
y_pred_large = [np.dot(np.reshape(m, (yw, yh - 1)), width_tf_i).flatten()
for m in y_preds[2]]
y_pred_large = np.asarray(y_pred_large)
#y_pred_big is the image with 2x2 patch application. We use previous
#matrices to get it back in original shape
y_pred_big = [np.dot(np.reshape(m, (yw - 1, yh - 1)), width_tf_i)
for m in y_preds[3]]
y_pred_big = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten()
for m in y_pred_big]
y_pred_big = np.asarray(y_pred_big)
return (y_pred, y_pred_tall, y_pred_large, y_pred_big)
y_pred, y_pred_tall, y_pred_large, y_pred_big = split_multi_scale(y_pred, y_shape)
y_pred = (.25 * y_pred + .25 * y_pred_tall + .25 * y_pred_large + .25 * y_pred_big)
#Check the Scores of the model
print("Scores")
print("------")
print(" - Accuracy (percent): %f" % np.mean([
accuracy_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
print(" - Precision: %f" % np.mean([
precision_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
print(" - Recall: %f" % np.mean([
recall_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
print(" - F1-score: %f" % np.mean([
f1_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
#Finally we plot the Images
for i in range(6):
j = 10 * i
fig = plt.figure()
sp1 = plt.subplot(131)
sp1.axis('off')
plt.title('Stimulus')
sp2 = plt.subplot(132)
sp2.axis('off')
plt.title('Reconstruction')
sp3 = plt.subplot(133)
sp3.axis('off')
plt.title('Binarized')
sp1.imshow(np.reshape(y_test[j], (10, 10)), cmap=plt.cm.gray,
interpolation='nearest'),
sp2.imshow(np.reshape(y_pred[j], (10, 10)), cmap=plt.cm.gray,
interpolation='nearest'),
sp3.imshow(np.reshape(y_pred[j] > .5, (10, 10)), cmap=plt.cm.gray,
interpolation='nearest')
show() |
the-stack_106_25840 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import concurrent.futures
import time
import pytest
from botocore.session import get_session
from graph_notebook.neptune.client import Client
from test.integration import DataDrivenSparqlTest
def long_running_sparql_query(c: Client, query: str):
res = c.sparql(query)
return res
class TestSparqlStatusWithIam(DataDrivenSparqlTest):
def setUp(self) -> None:
super().setUp()
self.client = self.client_builder.with_iam(get_session()).build()
@pytest.mark.iam
@pytest.mark.neptune
def test_do_sparql_status_nonexistent(self):
query_id = "invalid-guid"
status_res = self.client.sparql_status(query_id)
assert status_res.status_code == 200
assert status_res.content == b''
@pytest.mark.iam
@pytest.mark.neptune
def test_do_sparql_cancel_nonexistent(self):
query_id = "invalid-guid"
cancel_res = self.client.sparql_cancel(query_id)
assert cancel_res.status_code == 400
assert cancel_res.content == b'Invalid queryId (not a UUID): invalid-guid'
@pytest.mark.iam
@pytest.mark.neptune
def test_do_sparql_cancel_empty_query_id(self):
with pytest.raises(ValueError):
self.client.sparql_cancel('')
@pytest.mark.iam
@pytest.mark.neptune
def test_do_sparql_cancel_non_str_query_id(self):
with pytest.raises(ValueError):
self.client.sparql_cancel(42)
@pytest.mark.iam
@pytest.mark.neptune
def test_do_sparql_status_and_cancel(self):
query = "SELECT * WHERE { ?s ?p ?o . ?s2 ?p2 ?o2 .?s3 ?p3 ?o3 . ?s4 ?s5 ?s6 .} ORDER BY DESC(?s)"
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(long_running_sparql_query, self.client, query)
time.sleep(1)
status = self.client.sparql_status()
status_res = status.json()
assert 'acceptedQueryCount' in status_res
assert 'runningQueryCount' in status_res
assert 'queries' in status_res
time.sleep(1)
query_id = ''
for q in status_res['queries']:
if query in q['queryString']:
query_id = q['queryId']
self.assertNotEqual(query_id, '')
cancel = self.client.sparql_cancel(query_id, False)
cancel_res = cancel.json()
assert 'acceptedQueryCount' in cancel_res
assert 'acceptedQueryCount' in cancel_res
assert 'runningQueryCount' in cancel_res
assert 'queries' in cancel_res
res = future.result()
assert res.status_code == 500
raw = res.json()
assert raw['code'] == 'CancelledByUserException'
assert raw['detailedMessage'] == 'Operation terminated (cancelled by user)'
@pytest.mark.iam
@pytest.mark.neptune
def test_do_sparql_status_and_cancel_silently(self):
query = "SELECT * WHERE { ?s ?p ?o . ?s2 ?p2 ?o2 .?s3 ?p3 ?o3 . ?s4 ?s5 ?s6 .} ORDER BY DESC(?s)"
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(long_running_sparql_query, self.client, query)
time.sleep(1)
status = self.client.sparql_status()
status_res = status.json()
assert 'acceptedQueryCount' in status_res
assert 'runningQueryCount' in status_res
assert 'queries' in status_res
time.sleep(1)
query_id = ''
for q in status_res['queries']:
if query in q['queryString']:
query_id = q['queryId']
assert query_id != ''
cancel = self.client.sparql_cancel(query_id, True)
cancel_res = cancel.json()
assert 'acceptedQueryCount' in cancel_res
assert 'runningQueryCount' in cancel_res
assert 'queries' in cancel_res
res = future.result()
query_res = res.json()
assert type(query_res) is dict
assert 's3' in query_res['head']['vars']
assert 'p3' in query_res['head']['vars']
assert 'o3' in query_res['head']['vars']
assert [] == query_res['results']['bindings']
|
the-stack_106_25842 | # Import relevant packages
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
# Utility function
def u_func(h, c, par):
"""
Cobb-Douglas utility function for consumption and housing quality
Args:
h (float): housing quality and equal to housing price
c (float): other consumption
par: simplenamespace containing relevant parameters
phi (float): C-D weights
epsilon (float): public housing assement factor
r (float): mortgage interest
tau_g (float): base housing tax
tau_p (float): progressive housing tax
p_bar (float): cutoff price
m (float): cash-on-hand
Returns:
(float): utility
"""
return c**(1-par.phi)*h**par.phi
# Optimize function
def u_optimize(par):
"""
Optimises u_func with respect to housing quality and finds housing quality and consumption at the optimum
Args:
h (float): housing quality and equal to housing price
par: simplenamespace containing relevant parameters
phi (float): C-D weights
epsilon (float): public housing assement factor
r (float): mortgage interest
tau_g (float): base housing tax
tau_p (float): progressive housing tax
p_bar (float): cutoff price
m (float): cash-on-hand
Local variables:
p_thilde (float): public housing assement price
tax (float): interest rates and tax paid as a function of housing quality
c (float): other consumption
Returns:
h_star (float): optimal housing quality
c_star (float): optimal consumption
u_star (float): utility in optimum
"""
def objective(h, par):
# Use monotonicity to find c as a function of h
p_thilde = h * par.epsilon
tax = par.r * h + par.tau_g * p_thilde + par.tau_p * max(p_thilde-par.p_bar, 0)
c = par.m - tax
return -u_func(h, c, par)
res = optimize.minimize_scalar(objective, method ='brent', args = (par))
# Get optimal h, using monotonicity to find optimal c, then using u_func to find utility in optimum
h_star = res.x
p_thilde = h_star * par.epsilon
tax = par.r * h_star + par.tau_g * p_thilde + par.tau_p * max(p_thilde-par.p_bar, 0)
c_star = par.m - tax
u_star = u_func(h_star, c_star, par)
return h_star, c_star, u_star
# Plot function
def two_figures(x_left, y_left, title_left, xlabel_left, ylabel_left, x_right, y_right, title_right, xlabel_right, ylabel_right, grid=True):
"""
Plots two aligned figures.
Args: should be self explanatory...
Returns: Two figures in 2D
"""
# a. initialise figure
fig = plt.figure(figsize=(10,4))# figsize is in inches...
# b. left plot
ax_left = fig.add_subplot(1,2,1)
ax_left.plot(x_left,y_left)
ax_left.set_title(title_left)
ax_left.set_xlabel(xlabel_left)
ax_left.set_ylabel(ylabel_left)
ax_left.grid(grid)
# c. right plot
ax_right = fig.add_subplot(1,2,2)
ax_right.plot(x_right, y_right)
ax_right.set_title(title_right)
ax_right.set_xlabel(xlabel_right)
ax_right.set_ylabel(ylabel_right)
ax_right.grid(grid)
# Tax revenue function
def tax_total(par):
"""
Finds total tax burden in a log normal distributed population
Args:
par: simplenamespace containing relevant parameters
phi (float): C-D weights
epsilon (float): public housing assement factor
r (float): mortgage interest
tau_g (float): base housing tax
tau_p (float): progressive housing tax
p_bar (float): cutoff price
m (float): cash-on-hand
seed (int): seed number for random draws
mu (float): mean value for the distribution
sigma (float): standard deviation for the distribution
Local variables:
h_cit (float): housing quality choice of one citizen in the population
c_cit (float): other consumption choice of one citizen in the population
u_cit (float): utility for one citizen in the population given chice of h and c
Returns:
T (float): total tax burden
"""
# Set seed and tax = 0
np.random.seed(par.seed)
T = 0
# Loop through every citizen in the population and calculate optimal choices
# and tax given those choices
for i in range(par.pop):
par.m = np.random.lognormal(par.mu, par.sigma)
h_cit, c_cit, u_cit = u_optimize(par)
T += par.tau_g*(h_cit*par.epsilon) + par.tau_p*max((par.epsilon*h_cit)-par.p_bar, 0)
return T
# Base tax percentage function
def base_tax_pct(par):
"""
Finds optimal base tax percentage for tax reform given the tax burden before the reform
Uses root optimisation
Args:
tau_g (float): base tax level
par: simplenamespace containing relevant parameters
phi (float): C-D weights
epsilon (float): public housing assement factor
r (float): mortgage interest
tau_g (float): base housing tax
tau_p (float): progressive housing tax
p_bar (float): cutoff price
m (float): cash-on-hand
seed (int): seed number for random draws
mu (float): mean value for the distribution
sigma (float): standard deviation for the distribution
T_goal (float): level of tax burden the policy maker wants to hit
Returns:
tau (float): base tax percentage
"""
def obj(tau_g, par):
par.tau_g = tau_g
return tax_total(par) - par.T_goal
sol = optimize.root(obj, 0.01, args=(par))
tau = float(sol.x)
return tau |
the-stack_106_25843 | from typing_extensions import Final
import numpy as np
import torch
from torch import nn
from typing import Any, Dict, Optional, Union
from ptgnn.baseneuralmodel import AbstractNeuralModel
from ptgnn.baseneuralmodel.utils.data import enforce_not_None
from ptgnn.neuralmodels.gnn.structs import AbstractNodeEmbedder
class LinearFeatureEmbedder(nn.Module):
def __init__(
self,
input_element_size: int,
output_embedding_size: int,
activation: Optional[nn.Module] = None,
):
super().__init__()
self.__linear_map = nn.Linear(input_element_size, output_embedding_size, bias=False)
torch.nn.init.xavier_uniform(self.__linear_map.weight)
self.__activation = activation
def forward(self, features):
mapped_features = self.__linear_map(features)
if self.__activation is not None:
mapped_features = self.__activation(mapped_features)
return mapped_features
class FeatureRepresentationModel(
AbstractNeuralModel[np.ndarray, np.ndarray, LinearFeatureEmbedder],
AbstractNodeEmbedder,
):
"""
A model that maps a feature array to a D-sized representation (embedding) using a single linear layer.
"""
def __init__(
self,
*,
embedding_size: int = 64,
activation: Optional[nn.Module] = None,
):
super().__init__()
self.embedding_size: Final = embedding_size
self.__activation: Final = activation
def representation_size(self) -> int:
return self.embedding_size
def initialize_metadata(self) -> None:
self.__num_input_features = None
def update_metadata_from(self, datapoint: np.ndarray) -> None:
if self.__num_input_features is None:
self.__num_input_features = datapoint.shape[0]
else:
assert (
self.__num_input_features == datapoint.shape[0]
), "All samples should have the same number of features."
def build_neural_module(self) -> LinearFeatureEmbedder:
return LinearFeatureEmbedder(
input_element_size=enforce_not_None(self.__num_input_features),
output_embedding_size=self.embedding_size,
activation=self.__activation,
)
def tensorize(self, datapoint: np.ndarray) -> np.ndarray:
return datapoint
def initialize_minibatch(self) -> Dict[str, Any]:
return {"features": []}
def extend_minibatch_with(
self, tensorized_datapoint: np.ndarray, partial_minibatch: Dict[str, Any]
) -> bool:
partial_minibatch["features"].append(tensorized_datapoint)
return True
def finalize_minibatch(
self, accumulated_minibatch_data: Dict[str, Any], device: Union[str, torch.device]
) -> Dict[str, Any]:
return {
"features": torch.tensor(
accumulated_minibatch_data["features"], dtype=torch.float32, device=device
)
}
|
the-stack_106_25846 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, Union
import numpy
from ..dispatch import DataFrameType, _encode_list_column, _hash_series, _is_list_dtype, annotate
from ..tags import Tags
from .categorify import _emb_sz_rule, _get_embedding_order
from .operator import ColumnSelector, Operator
class HashBucket(Operator):
"""
This op maps categorical columns to a contiguous integer range by first
hashing the column, then reducing modulo the number of buckets.
Example usage::
cat_names = ["feature_a", "feature_b"]
# this will hash both features a and b to 100 buckets
hash_features = cat_names >> ops.HashBucket({"feature_a": 100, "feature_b": 50})
processor = nvtabular.Workflow(hash_features)
The output of this op would be::
feature_a feature_b
0 90 11
1 70 40
2 52 9
If you would like to do frequency capping or frequency hashing,
you should use Categorify op instead. See
`Categorify op <https://github.com/NVIDIA/NVTabular/blob/main/nvtabular/ops/categorify.py#L43>`_
for example usage.
Parameters
----------
num_buckets : int or dictionary:{column: num_hash_buckets}
Column-wise modulo to apply after hash function. Note that this
means that the corresponding value will be the categorical cardinality
of the transformed categorical feature. If given as an int, that value
will be used as the number of "hash buckets" for every feature.
If a dictionary is passed, it will be used to specify
explicit mappings from a column name to a number of buckets. In
this case, only the columns specified in the keys of `num_buckets`
will be transformed.
"""
def __init__(self, num_buckets: Union[int, Dict[str, int]]):
if isinstance(num_buckets, dict):
self.num_buckets = num_buckets
elif isinstance(num_buckets, int):
self.num_buckets = num_buckets
else:
raise TypeError(
"`num_buckets` must be dict, iterable, or int, got type {}".format(
type(num_buckets)
)
)
super(HashBucket, self).__init__()
@annotate("HashBucket_op", color="darkgreen", domain="nvt_python")
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
if isinstance(self.num_buckets, int):
num_buckets = {name: self.num_buckets for name in col_selector.names}
else:
num_buckets = self.num_buckets
for col, nb in num_buckets.items():
if _is_list_dtype(df[col].dtype):
df[col] = _encode_list_column(df[col], _hash_series(df[col]) % nb)
else:
df[col] = _hash_series(df[col]) % nb
return df
transform.__doc__ = Operator.transform.__doc__
def get_embedding_sizes(self, columns):
columns = _get_embedding_order(columns)
if isinstance(self.num_buckets, int):
embedding_size = _emb_sz_rule(self.num_buckets)
return {col: embedding_size for col in columns}
else:
return {col: _emb_sz_rule(self.num_buckets[col]) for col in columns}
def _add_properties(self, column_schema):
cardinality, dimensions = self.get_embedding_sizes([column_schema.name])[column_schema.name]
if cardinality and dimensions:
to_add = {
"domain": {"min": 0, "max": cardinality},
"embedding_sizes": {"cardinality": cardinality, "dimension": dimensions},
}
column_schema = column_schema.with_properties(to_add)
return column_schema
def output_tags(self):
return [Tags.CATEGORICAL]
def _get_dtypes(self):
return numpy.int64
|
the-stack_106_25848 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from collections.abc import AsyncIterator
import functools
import logging
from typing import Any, Callable, Union, Optional, AsyncIterator as AsyncIteratorType
import trio
import urllib3
import requests
from azure.core.exceptions import (
ServiceRequestError,
ServiceResponseError
)
from azure.core.pipeline import Pipeline
from ._base import HttpRequest
from ._base_async import (
AsyncHttpResponse,
_ResponseStopIteration,
_iterate_response_content)
from ._requests_basic import RequestsTransportResponse
from ._base_requests_async import RequestsAsyncTransportBase
_LOGGER = logging.getLogger(__name__)
class TrioStreamDownloadGenerator(AsyncIterator):
"""Generator for streaming response data.
:param pipeline: The pipeline object
:param response: The response object.
"""
def __init__(self, pipeline: Pipeline, response: AsyncHttpResponse) -> None:
self.pipeline = pipeline
self.request = response.request
self.response = response
self.block_size = response.block_size
self.iter_content_func = self.response.internal_response.iter_content(self.block_size)
self.content_length = int(response.headers.get('Content-Length', 0))
self.downloaded = 0
def __len__(self):
return self.content_length
async def __anext__(self):
try:
try:
chunk = await trio.to_thread.run_sync(
_iterate_response_content,
self.iter_content_func,
)
except AttributeError: # trio < 0.12.1
chunk = await trio.run_sync_in_worker_thread( # pylint: disable=no-member
_iterate_response_content,
self.iter_content_func,
)
if not chunk:
raise _ResponseStopIteration()
return chunk
except _ResponseStopIteration:
self.response.internal_response.close()
raise StopAsyncIteration()
except requests.exceptions.StreamConsumedError:
raise
except Exception as err:
_LOGGER.warning("Unable to stream download: %s", err)
self.response.internal_response.close()
raise
class TrioRequestsTransportResponse(AsyncHttpResponse, RequestsTransportResponse): # type: ignore
"""Asynchronous streaming of data from the response.
"""
def stream_download(self, pipeline) -> AsyncIteratorType[bytes]: # type: ignore
"""Generator for streaming response data.
"""
return TrioStreamDownloadGenerator(pipeline, self)
class TrioRequestsTransport(RequestsAsyncTransportBase): # type: ignore
"""Identical implementation as the synchronous RequestsTransport wrapped in a class with
asynchronous methods. Uses the third party trio event loop.
.. admonition:: Example:
.. literalinclude:: ../samples/test_example_async.py
:start-after: [START trio]
:end-before: [END trio]
:language: python
:dedent: 4
:caption: Asynchronous transport with trio.
"""
async def __aenter__(self):
return super(TrioRequestsTransport, self).__enter__()
async def __aexit__(self, *exc_details): # pylint: disable=arguments-differ
return super(TrioRequestsTransport, self).__exit__()
async def sleep(self, duration): # pylint:disable=invalid-overridden-method
await trio.sleep(duration)
async def send(self, request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: # type: ignore # pylint:disable=invalid-overridden-method
"""Send the request using this HTTP sender.
:param request: The HttpRequest
:type request: ~azure.core.pipeline.transport.HttpRequest
:return: The AsyncHttpResponse
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
:keyword requests.Session session: will override the driver session and use yours.
Should NOT be done unless really required. Anything else is sent straight to requests.
:keyword dict proxies: will define the proxy to use. Proxy is a dict (protocol, url)
"""
self.open()
trio_limiter = kwargs.get("trio_limiter", None)
response = None
error = None # type: Optional[Union[ServiceRequestError, ServiceResponseError]]
data_to_send = await self._retrieve_request_data(request)
try:
try:
response = await trio.to_thread.run_sync(
functools.partial(
self.session.request,
request.method,
request.url,
headers=request.headers,
data=data_to_send,
files=request.files,
verify=kwargs.pop('connection_verify', self.connection_config.verify),
timeout=kwargs.pop('connection_timeout', self.connection_config.timeout),
cert=kwargs.pop('connection_cert', self.connection_config.cert),
allow_redirects=False,
**kwargs),
limiter=trio_limiter)
except AttributeError: # trio < 0.12.1
response = await trio.run_sync_in_worker_thread( # pylint: disable=no-member
functools.partial(
self.session.request,
request.method,
request.url,
headers=request.headers,
data=request.data,
files=request.files,
verify=kwargs.pop('connection_verify', self.connection_config.verify),
timeout=kwargs.pop('connection_timeout', self.connection_config.timeout),
cert=kwargs.pop('connection_cert', self.connection_config.cert),
allow_redirects=False,
**kwargs),
limiter=trio_limiter)
except urllib3.exceptions.NewConnectionError as err:
error = ServiceRequestError(err, error=err)
except requests.exceptions.ReadTimeout as err:
error = ServiceResponseError(err, error=err)
except requests.exceptions.ConnectionError as err:
if err.args and isinstance(err.args[0], urllib3.exceptions.ProtocolError):
error = ServiceResponseError(err, error=err)
else:
error = ServiceRequestError(err, error=err)
except requests.RequestException as err:
error = ServiceRequestError(err, error=err)
if error:
raise error
return TrioRequestsTransportResponse(request, response, self.connection_config.data_block_size)
|
the-stack_106_25854 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import warnings
from collections import OrderedDict
from functools import partial
from torch.distributions import biject_to, constraints
from torch.nn import Parameter
import pyro
import pyro.distributions as dist
from pyro.distributions.util import eye_like
from pyro.nn.module import PyroModule, PyroParam, PyroSample, pyro_method
def _is_real_support(support):
if isinstance(support, pyro.distributions.constraints.independent):
return _is_real_support(support.base_constraint)
else:
return support in [constraints.real, constraints.real_vector]
def _get_sample_fn(module, name):
if module.mode == "model":
return module._priors[name]
dist_constructor, dist_args = module._guides[name]
if dist_constructor is dist.Delta:
p_map = getattr(module, "{}_map".format(name))
return dist.Delta(p_map, event_dim=p_map.dim())
# create guide
dist_args = {arg: getattr(module, "{}_{}".format(name, arg)) for arg in dist_args}
guide = dist_constructor(**dist_args)
# no need to do transforms when support is real (for mean field ELBO)
support = module._priors[name].support
if _is_real_support(support):
return guide.to_event()
# otherwise, we do inference in unconstrained space and transform the value
# back to original space
# TODO: move this logic to infer.autoguide or somewhere else
unconstrained_value = pyro.sample(
module._pyro_get_fullname("{}_latent".format(name)),
guide.to_event(),
infer={"is_auxiliary": True},
)
transform = biject_to(support)
value = transform(unconstrained_value)
log_density = transform.inv.log_abs_det_jacobian(value, unconstrained_value)
return dist.Delta(value, log_density.sum(), event_dim=value.dim())
class Parameterized(PyroModule):
"""
A wrapper of :class:`~pyro.nn.module.PyroModule` whose parameters can be set
constraints, set priors.
By default, when we set a prior to a parameter, an auto Delta guide will be
created. We can use the method :meth:`autoguide` to setup other auto guides.
Example::
>>> class Linear(Parameterized):
... def __init__(self, a, b):
... super().__init__()
... self.a = Parameter(a)
... self.b = Parameter(b)
...
... def forward(self, x):
... return self.a * x + self.b
...
>>> linear = Linear(torch.tensor(1.), torch.tensor(0.))
>>> linear.a = PyroParam(torch.tensor(1.), constraints.positive)
>>> linear.b = PyroSample(dist.Normal(0, 1))
>>> linear.autoguide("b", dist.Normal)
>>> assert "a_unconstrained" in dict(linear.named_parameters())
>>> assert "b_loc" in dict(linear.named_parameters())
>>> assert "b_scale_unconstrained" in dict(linear.named_parameters())
Note that by default, data of a parameter is a float :class:`torch.Tensor`
(unless we use :func:`torch.set_default_tensor_type` to change default
tensor type). To cast these parameters to a correct data type or GPU device,
we can call methods such as :meth:`~torch.nn.Module.double` or
:meth:`~torch.nn.Module.cuda`. See :class:`torch.nn.Module` for more
information.
"""
def __init__(self):
super().__init__()
self._priors = OrderedDict()
self._guides = OrderedDict()
self._mode = "model"
def set_prior(self, name, prior):
"""
Sets prior for a parameter.
:param str name: Name of the parameter.
:param ~pyro.distributions.distribution.Distribution prior: A Pyro prior
distribution.
"""
warnings.warn(
"The method `self.set_prior({}, prior)` has been deprecated"
" in favor of `self.{} = PyroSample(prior)`.".format(name, name),
UserWarning,
)
setattr(self, name, PyroSample(prior))
def __setattr__(self, name, value):
if isinstance(value, PyroSample):
prior = value.prior
if hasattr(prior, "sample"):
self._priors[name] = prior
self.autoguide(name, dist.Delta)
value = PyroSample(partial(_get_sample_fn, name=name))
super().__setattr__(name, value)
def autoguide(self, name, dist_constructor):
"""
Sets an autoguide for an existing parameter with name ``name`` (mimic
the behavior of module :mod:`pyro.infer.autoguide`).
.. note:: `dist_constructor` should be one of
:class:`~pyro.distributions.Delta`,
:class:`~pyro.distributions.Normal`, and
:class:`~pyro.distributions.MultivariateNormal`. More distribution
constructor will be supported in the future if needed.
:param str name: Name of the parameter.
:param dist_constructor: A
:class:`~pyro.distributions.distribution.Distribution` constructor.
"""
if name not in self._priors:
raise ValueError("There is no prior for parameter: {}".format(name))
if dist_constructor not in [dist.Delta, dist.Normal, dist.MultivariateNormal]:
raise NotImplementedError(
"Unsupported distribution type: {}".format(dist_constructor)
)
# delete old guide
if name in self._guides:
dist_args = self._guides[name][1]
for arg in dist_args:
delattr(self, "{}_{}".format(name, arg))
p = self._priors[name]() # init_to_sample strategy
if dist_constructor is dist.Delta:
support = self._priors[name].support
if _is_real_support(support):
p_map = Parameter(p.detach())
else:
p_map = PyroParam(p.detach(), support)
setattr(self, "{}_map".format(name), p_map)
dist_args = ("map",)
elif dist_constructor is dist.Normal:
loc = Parameter(biject_to(self._priors[name].support).inv(p).detach())
scale = PyroParam(loc.new_ones(loc.shape), constraints.positive)
setattr(self, "{}_loc".format(name), loc)
setattr(self, "{}_scale".format(name), scale)
dist_args = ("loc", "scale")
elif dist_constructor is dist.MultivariateNormal:
loc = Parameter(biject_to(self._priors[name].support).inv(p).detach())
identity = eye_like(loc, loc.size(-1))
scale_tril = PyroParam(
identity.repeat(loc.shape[:-1] + (1, 1)), constraints.lower_cholesky
)
setattr(self, "{}_loc".format(name), loc)
setattr(self, "{}_scale_tril".format(name), scale_tril)
dist_args = ("loc", "scale_tril")
else:
raise NotImplementedError
self._guides[name] = (dist_constructor, dist_args)
@pyro_method
def _load_pyro_samples(self):
"""
Runs `pyro.sample` primitives for all `PyroSample` attributes.
"""
for module in self.modules():
if "_pyro_samples" in module.__dict__:
for name in module._pyro_samples:
getattr(module, name)
def set_mode(self, mode):
"""
Sets ``mode`` of this object to be able to use its parameters in
stochastic functions. If ``mode="model"``, a parameter will get its
value from its prior. If ``mode="guide"``, the value will be drawn from
its guide.
.. note:: This method automatically sets ``mode`` for submodules which
belong to :class:`Parameterized` class.
:param str mode: Either "model" or "guide".
"""
for module in self.modules():
if isinstance(module, Parameterized):
module.mode = mode
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, mode):
self._mode = mode
|
the-stack_106_25855 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import pandas as pd
from scipy.stats import spearmanr, pearsonr
from ..data import D
from collections import OrderedDict
def _get_position_value_from_df(evaluate_date, position, close_data_df):
"""Get position value by existed close data df
close_data_df:
pd.DataFrame
multi-index
close_data_df['$close'][stock_id][evaluate_date]: close price for (stock_id, evaluate_date)
position:
same in get_position_value()
"""
value = 0
for stock_id, report in position.items():
if stock_id != "cash":
value += report["amount"] * close_data_df["$close"][stock_id][evaluate_date]
# value += report['amount'] * report['price']
if "cash" in position:
value += position["cash"]
return value
def get_position_value(evaluate_date, position):
"""sum of close*amount
get value of postion
use close price
postions:
{
Timestamp('2016-01-05 00:00:00'):
{
'SH600022':
{
'amount':100.00,
'price':12.00
},
'cash':100000.0
}
}
It means Hold 100.0 'SH600022' and 100000.0 RMB in '2016-01-05'
"""
# load close price for position
# position should also consider cash
instruments = list(position.keys())
instruments = list(set(instruments) - set(["cash"])) # filter 'cash'
fields = ["$close"]
close_data_df = D.features(
instruments,
fields,
start_time=evaluate_date,
end_time=evaluate_date,
freq="day",
disk_cache=0,
)
value = _get_position_value_from_df(evaluate_date, position, close_data_df)
return value
def get_position_list_value(positions):
# generate instrument list and date for whole poitions
instruments = set()
for day, position in positions.items():
instruments.update(position.keys())
instruments = list(set(instruments) - set(["cash"])) # filter 'cash'
instruments.sort()
day_list = list(positions.keys())
day_list.sort()
start_date, end_date = day_list[0], day_list[-1]
# load data
fields = ["$close"]
close_data_df = D.features(
instruments,
fields,
start_time=start_date,
end_time=end_date,
freq="day",
disk_cache=0,
)
# generate value
# return dict for time:position_value
value_dict = OrderedDict()
for day, position in positions.items():
value = _get_position_value_from_df(evaluate_date=day, position=position, close_data_df=close_data_df)
value_dict[day] = value
return value_dict
def get_daily_return_series_from_positions(positions, init_asset_value):
"""Parameters
generate daily return series from position view
positions: positions generated by strategy
init_asset_value : init asset value
return: pd.Series of daily return , return_series[date] = daily return rate
"""
value_dict = get_position_list_value(positions)
value_series = pd.Series(value_dict)
value_series = value_series.sort_index() # check date
return_series = value_series.pct_change()
return_series[value_series.index[0]] = (
value_series[value_series.index[0]] / init_asset_value - 1
) # update daily return for the first date
return return_series
def get_annual_return_from_positions(positions, init_asset_value):
"""Annualized Returns
p_r = (p_end / p_start)^{(250/n)} - 1
p_r annual return
p_end final value
p_start init value
n days of backtest
"""
date_range_list = sorted(list(positions.keys()))
end_time = date_range_list[-1]
p_end = get_position_value(end_time, positions[end_time])
p_start = init_asset_value
n_period = len(date_range_list)
annual = pow((p_end / p_start), (250 / n_period)) - 1
return annual
def get_annaul_return_from_return_series(r, method="ci"):
"""Risk Analysis from daily return series
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
"""
mean = r.mean()
annual = (1 + mean) ** 250 - 1 if method == "ci" else mean * 250
return annual
def get_sharpe_ratio_from_return_series(r, risk_free_rate=0.00, method="ci"):
"""Risk Analysis
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
risk_free_rate : float
risk_free_rate, default as 0.00, can set as 0.03 etc
"""
std = r.std(ddof=1)
annual = get_annaul_return_from_return_series(r, method=method)
sharpe = (annual - risk_free_rate) / std / np.sqrt(250)
return sharpe
def get_max_drawdown_from_series(r):
"""Risk Analysis from asset value
cumprod way
Parameters
----------
r : pandas.Series
daily return series
"""
# mdd = ((r.cumsum() - r.cumsum().cummax()) / (1 + r.cumsum().cummax())).min()
mdd = (((1 + r).cumprod() - (1 + r).cumprod().cummax()) / ((1 + r).cumprod().cummax())).min()
return mdd
def get_turnover_rate():
# in backtest
pass
def get_beta(r, b):
"""Risk Analysis beta
Parameters
----------
r : pandas.Series
daily return series of strategy
b : pandas.Series
daily return series of baseline
"""
cov_r_b = np.cov(r, b)
var_b = np.var(b)
return cov_r_b / var_b
def get_alpha(r, b, risk_free_rate=0.03):
beta = get_beta(r, b)
annaul_r = get_annaul_return_from_return_series(r)
annaul_b = get_annaul_return_from_return_series(b)
alpha = annaul_r - risk_free_rate - beta * (annaul_b - risk_free_rate)
return alpha
def get_volatility_from_series(r):
return r.std(ddof=1)
def get_rank_ic(a, b):
"""Rank IC
Parameters
----------
r : pandas.Series
daily score series of feature
b : pandas.Series
daily return series
"""
return spearmanr(a, b).correlation
def get_normal_ic(a, b):
return pearsonr(a, b).correlation
|
the-stack_106_25856 | """
The methods for loading Home Assistant integrations.
This module has quite some complex parts. I have tried to add as much
documentation as possible to keep it understandable.
"""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import functools as ft
import importlib
import json
import logging
import pathlib
import sys
from types import ModuleType
from typing import TYPE_CHECKING, Any, TypedDict, TypeVar, cast
from awesomeversion import (
AwesomeVersion,
AwesomeVersionException,
AwesomeVersionStrategy,
)
from .generated.dhcp import DHCP
from .generated.mqtt import MQTT
from .generated.ssdp import SSDP
from .generated.usb import USB
from .generated.zeroconf import HOMEKIT, ZEROCONF
from .util.async_ import gather_with_concurrency
# Typing imports that create a circular dependency
if TYPE_CHECKING:
from .core import HomeAssistant
CALLABLE_T = TypeVar( # pylint: disable=invalid-name
"CALLABLE_T", bound=Callable[..., Any]
)
_LOGGER = logging.getLogger(__name__)
DATA_COMPONENTS = "components"
DATA_INTEGRATIONS = "integrations"
DATA_CUSTOM_COMPONENTS = "custom_components"
PACKAGE_CUSTOM_COMPONENTS = "custom_components"
PACKAGE_BUILTIN = "homeassistant.components"
CUSTOM_WARNING = (
"We found a custom integration %s which has not "
"been tested by Home Assistant. This component might "
"cause stability problems, be sure to disable it if you "
"experience issues with Home Assistant"
)
_UNDEF = object() # Internal; not helpers.typing.UNDEFINED due to circular dependency
MAX_LOAD_CONCURRENTLY = 4
MOVED_ZEROCONF_PROPS = ("macaddress", "model", "manufacturer")
class Manifest(TypedDict, total=False):
"""
Integration manifest.
Note that none of the attributes are marked Optional here. However, some of them may be optional in manifest.json
in the sense that they can be omitted altogether. But when present, they should not have null values in it.
"""
name: str
disabled: str
domain: str
dependencies: list[str]
after_dependencies: list[str]
requirements: list[str]
config_flow: bool
documentation: str
issue_tracker: str
quality_scale: str
iot_class: str
mqtt: list[str]
ssdp: list[dict[str, str]]
zeroconf: list[str | dict[str, str]]
dhcp: list[dict[str, str]]
usb: list[dict[str, str]]
homekit: dict[str, list[str]]
is_built_in: bool
version: str
codeowners: list[str]
def manifest_from_legacy_module(domain: str, module: ModuleType) -> Manifest:
"""Generate a manifest from a legacy module."""
return {
"domain": domain,
"name": domain,
"requirements": getattr(module, "REQUIREMENTS", []),
"dependencies": getattr(module, "DEPENDENCIES", []),
"codeowners": [],
}
async def _async_get_custom_components(
hass: HomeAssistant,
) -> dict[str, Integration]:
"""Return list of custom integrations."""
if hass.config.safe_mode:
return {}
try:
import custom_components # pylint: disable=import-outside-toplevel
except ImportError:
return {}
def get_sub_directories(paths: list[str]) -> list[pathlib.Path]:
"""Return all sub directories in a set of paths."""
return [
entry
for path in paths
for entry in pathlib.Path(path).iterdir()
if entry.is_dir()
]
dirs = await hass.async_add_executor_job(
get_sub_directories, custom_components.__path__
)
integrations = await gather_with_concurrency(
MAX_LOAD_CONCURRENTLY,
*(
hass.async_add_executor_job(
Integration.resolve_from_root, hass, custom_components, comp.name
)
for comp in dirs
),
)
return {
integration.domain: integration
for integration in integrations
if integration is not None
}
async def async_get_custom_components(
hass: HomeAssistant,
) -> dict[str, Integration]:
"""Return cached list of custom integrations."""
if (reg_or_evt := hass.data.get(DATA_CUSTOM_COMPONENTS)) is None:
evt = hass.data[DATA_CUSTOM_COMPONENTS] = asyncio.Event()
reg = await _async_get_custom_components(hass)
hass.data[DATA_CUSTOM_COMPONENTS] = reg
evt.set()
return reg
if isinstance(reg_or_evt, asyncio.Event):
await reg_or_evt.wait()
return cast(dict[str, "Integration"], hass.data.get(DATA_CUSTOM_COMPONENTS))
return cast(dict[str, "Integration"], reg_or_evt)
async def async_get_config_flows(hass: HomeAssistant) -> set[str]:
"""Return cached list of config flows."""
# pylint: disable=import-outside-toplevel
from .generated.config_flows import FLOWS
flows: set[str] = set()
flows.update(FLOWS)
integrations = await async_get_custom_components(hass)
flows.update(
[
integration.domain
for integration in integrations.values()
if integration.config_flow
]
)
return flows
def async_process_zeroconf_match_dict(entry: dict[str, Any]) -> dict[str, Any]:
"""Handle backwards compat with zeroconf matchers."""
entry_without_type: dict[str, Any] = entry.copy()
del entry_without_type["type"]
# These properties keys used to be at the top level, we relocate
# them for backwards compat
for moved_prop in MOVED_ZEROCONF_PROPS:
if value := entry_without_type.pop(moved_prop, None):
_LOGGER.warning(
'Matching the zeroconf property "%s" at top-level is deprecated and should be moved into a properties dict; Check the developer documentation',
moved_prop,
)
if "properties" not in entry_without_type:
prop_dict: dict[str, str] = {}
entry_without_type["properties"] = prop_dict
else:
prop_dict = entry_without_type["properties"]
prop_dict[moved_prop] = value.lower()
return entry_without_type
async def async_get_zeroconf(
hass: HomeAssistant,
) -> dict[str, list[dict[str, str | dict[str, str]]]]:
"""Return cached list of zeroconf types."""
zeroconf: dict[str, list[dict[str, str | dict[str, str]]]] = ZEROCONF.copy() # type: ignore[assignment]
integrations = await async_get_custom_components(hass)
for integration in integrations.values():
if not integration.zeroconf:
continue
for entry in integration.zeroconf:
data: dict[str, str | dict[str, str]] = {"domain": integration.domain}
if isinstance(entry, dict):
typ = entry["type"]
data.update(async_process_zeroconf_match_dict(entry))
else:
typ = entry
zeroconf.setdefault(typ, []).append(data)
return zeroconf
async def async_get_dhcp(hass: HomeAssistant) -> list[dict[str, str]]:
"""Return cached list of dhcp types."""
dhcp: list[dict[str, str]] = DHCP.copy()
integrations = await async_get_custom_components(hass)
for integration in integrations.values():
if not integration.dhcp:
continue
for entry in integration.dhcp:
dhcp.append({"domain": integration.domain, **entry})
return dhcp
async def async_get_usb(hass: HomeAssistant) -> list[dict[str, str]]:
"""Return cached list of usb types."""
usb: list[dict[str, str]] = USB.copy()
integrations = await async_get_custom_components(hass)
for integration in integrations.values():
if not integration.usb:
continue
for entry in integration.usb:
usb.append(
{
"domain": integration.domain,
**{k: v for k, v in entry.items() if k != "known_devices"},
}
)
return usb
async def async_get_homekit(hass: HomeAssistant) -> dict[str, str]:
"""Return cached list of homekit models."""
homekit: dict[str, str] = HOMEKIT.copy()
integrations = await async_get_custom_components(hass)
for integration in integrations.values():
if (
not integration.homekit
or "models" not in integration.homekit
or not integration.homekit["models"]
):
continue
for model in integration.homekit["models"]:
homekit[model] = integration.domain
return homekit
async def async_get_ssdp(hass: HomeAssistant) -> dict[str, list[dict[str, str]]]:
"""Return cached list of ssdp mappings."""
ssdp: dict[str, list[dict[str, str]]] = SSDP.copy()
integrations = await async_get_custom_components(hass)
for integration in integrations.values():
if not integration.ssdp:
continue
ssdp[integration.domain] = integration.ssdp
return ssdp
async def async_get_mqtt(hass: HomeAssistant) -> dict[str, list[str]]:
"""Return cached list of MQTT mappings."""
mqtt: dict[str, list[str]] = MQTT.copy()
integrations = await async_get_custom_components(hass)
for integration in integrations.values():
if not integration.mqtt:
continue
mqtt[integration.domain] = integration.mqtt
return mqtt
class Integration:
"""An integration in Home Assistant."""
@classmethod
def resolve_from_root(
cls, hass: HomeAssistant, root_module: ModuleType, domain: str
) -> Integration | None:
"""Resolve an integration from a root module."""
for base in root_module.__path__:
manifest_path = pathlib.Path(base) / domain / "manifest.json"
if not manifest_path.is_file():
continue
try:
manifest = json.loads(manifest_path.read_text())
except ValueError as err:
_LOGGER.error(
"Error parsing manifest.json file at %s: %s", manifest_path, err
)
continue
integration = cls(
hass,
f"{root_module.__name__}.{domain}",
manifest_path.parent,
manifest,
)
if integration.is_built_in:
return integration
_LOGGER.warning(CUSTOM_WARNING, integration.domain)
if integration.version is None:
_LOGGER.error(
"The custom integration '%s' does not have a "
"version key in the manifest file and was blocked from loading. "
"See https://developers.home-assistant.io/blog/2021/01/29/custom-integration-changes#versions for more details",
integration.domain,
)
return None
try:
AwesomeVersion(
integration.version,
[
AwesomeVersionStrategy.CALVER,
AwesomeVersionStrategy.SEMVER,
AwesomeVersionStrategy.SIMPLEVER,
AwesomeVersionStrategy.BUILDVER,
AwesomeVersionStrategy.PEP440,
],
)
except AwesomeVersionException:
_LOGGER.error(
"The custom integration '%s' does not have a "
"valid version key (%s) in the manifest file and was blocked from loading. "
"See https://developers.home-assistant.io/blog/2021/01/29/custom-integration-changes#versions for more details",
integration.domain,
integration.version,
)
return None
return integration
return None
def __init__(
self,
hass: HomeAssistant,
pkg_path: str,
file_path: pathlib.Path,
manifest: Manifest,
) -> None:
"""Initialize an integration."""
self.hass = hass
self.pkg_path = pkg_path
self.file_path = file_path
self.manifest = manifest
manifest["is_built_in"] = self.is_built_in
if self.dependencies:
self._all_dependencies_resolved: bool | None = None
self._all_dependencies: set[str] | None = None
else:
self._all_dependencies_resolved = True
self._all_dependencies = set()
_LOGGER.info("Loaded %s from %s", self.domain, pkg_path)
@property
def name(self) -> str:
"""Return name."""
return self.manifest["name"]
@property
def disabled(self) -> str | None:
"""Return reason integration is disabled."""
return self.manifest.get("disabled")
@property
def domain(self) -> str:
"""Return domain."""
return self.manifest["domain"]
@property
def dependencies(self) -> list[str]:
"""Return dependencies."""
return self.manifest.get("dependencies", [])
@property
def after_dependencies(self) -> list[str]:
"""Return after_dependencies."""
return self.manifest.get("after_dependencies", [])
@property
def requirements(self) -> list[str]:
"""Return requirements."""
return self.manifest.get("requirements", [])
@property
def config_flow(self) -> bool:
"""Return config_flow."""
return self.manifest.get("config_flow") or False
@property
def documentation(self) -> str | None:
"""Return documentation."""
return self.manifest.get("documentation")
@property
def issue_tracker(self) -> str | None:
"""Return issue tracker link."""
return self.manifest.get("issue_tracker")
@property
def quality_scale(self) -> str | None:
"""Return Integration Quality Scale."""
return self.manifest.get("quality_scale")
@property
def iot_class(self) -> str | None:
"""Return the integration IoT Class."""
return self.manifest.get("iot_class")
@property
def mqtt(self) -> list[str] | None:
"""Return Integration MQTT entries."""
return self.manifest.get("mqtt")
@property
def ssdp(self) -> list[dict[str, str]] | None:
"""Return Integration SSDP entries."""
return self.manifest.get("ssdp")
@property
def zeroconf(self) -> list[str | dict[str, str]] | None:
"""Return Integration zeroconf entries."""
return self.manifest.get("zeroconf")
@property
def dhcp(self) -> list[dict[str, str]] | None:
"""Return Integration dhcp entries."""
return self.manifest.get("dhcp")
@property
def usb(self) -> list[dict[str, str]] | None:
"""Return Integration usb entries."""
return self.manifest.get("usb")
@property
def homekit(self) -> dict[str, list[str]] | None:
"""Return Integration homekit entries."""
return self.manifest.get("homekit")
@property
def is_built_in(self) -> bool:
"""Test if package is a built-in integration."""
return self.pkg_path.startswith(PACKAGE_BUILTIN)
@property
def version(self) -> AwesomeVersion | None:
"""Return the version of the integration."""
if "version" not in self.manifest:
return None
return AwesomeVersion(self.manifest["version"])
@property
def all_dependencies(self) -> set[str]:
"""Return all dependencies including sub-dependencies."""
if self._all_dependencies is None:
raise RuntimeError("Dependencies not resolved!")
return self._all_dependencies
@property
def all_dependencies_resolved(self) -> bool:
"""Return if all dependencies have been resolved."""
return self._all_dependencies_resolved is not None
async def resolve_dependencies(self) -> bool:
"""Resolve all dependencies."""
if self._all_dependencies_resolved is not None:
return self._all_dependencies_resolved
try:
dependencies = await _async_component_dependencies(
self.hass, self.domain, self, set(), set()
)
dependencies.discard(self.domain)
self._all_dependencies = dependencies
self._all_dependencies_resolved = True
except IntegrationNotFound as err:
_LOGGER.error(
"Unable to resolve dependencies for %s: we are unable to resolve (sub)dependency %s",
self.domain,
err.domain,
)
self._all_dependencies_resolved = False
except CircularDependency as err:
_LOGGER.error(
"Unable to resolve dependencies for %s: it contains a circular dependency: %s -> %s",
self.domain,
err.from_domain,
err.to_domain,
)
self._all_dependencies_resolved = False
return self._all_dependencies_resolved
def get_component(self) -> ModuleType:
"""Return the component."""
cache: dict[str, ModuleType] = self.hass.data.setdefault(DATA_COMPONENTS, {})
if self.domain in cache:
return cache[self.domain]
try:
cache[self.domain] = importlib.import_module(self.pkg_path)
except ImportError:
raise
except Exception as err:
_LOGGER.exception(
"Unexpected exception importing component %s", self.pkg_path
)
raise ImportError(f"Exception importing {self.pkg_path}") from err
return cache[self.domain]
def get_platform(self, platform_name: str) -> ModuleType:
"""Return a platform for an integration."""
cache: dict[str, ModuleType] = self.hass.data.setdefault(DATA_COMPONENTS, {})
full_name = f"{self.domain}.{platform_name}"
if full_name in cache:
return cache[full_name]
try:
cache[full_name] = self._import_platform(platform_name)
except ImportError:
raise
except Exception as err:
_LOGGER.exception(
"Unexpected exception importing platform %s.%s",
self.pkg_path,
platform_name,
)
raise ImportError(
f"Exception importing {self.pkg_path}.{platform_name}"
) from err
return cache[full_name]
def _import_platform(self, platform_name: str) -> ModuleType:
"""Import the platform."""
return importlib.import_module(f"{self.pkg_path}.{platform_name}")
def __repr__(self) -> str:
"""Text representation of class."""
return f"<Integration {self.domain}: {self.pkg_path}>"
async def async_get_integration(hass: HomeAssistant, domain: str) -> Integration:
"""Get an integration."""
if (cache := hass.data.get(DATA_INTEGRATIONS)) is None:
if not _async_mount_config_dir(hass):
raise IntegrationNotFound(domain)
cache = hass.data[DATA_INTEGRATIONS] = {}
int_or_evt: Integration | asyncio.Event | None = cache.get(domain, _UNDEF)
if isinstance(int_or_evt, asyncio.Event):
await int_or_evt.wait()
# When we have waited and it's _UNDEF, it doesn't exist
# We don't cache that it doesn't exist, or else people can't fix it
# and then restart, because their config will never be valid.
if (int_or_evt := cache.get(domain, _UNDEF)) is _UNDEF:
raise IntegrationNotFound(domain)
if int_or_evt is not _UNDEF:
return cast(Integration, int_or_evt)
event = cache[domain] = asyncio.Event()
try:
integration = await _async_get_integration(hass, domain)
except Exception:
# Remove event from cache.
cache.pop(domain)
event.set()
raise
cache[domain] = integration
event.set()
return integration
async def _async_get_integration(hass: HomeAssistant, domain: str) -> Integration:
if "." in domain:
raise ValueError(f"Invalid domain {domain}")
# Instead of using resolve_from_root we use the cache of custom
# components to find the integration.
if integration := (await async_get_custom_components(hass)).get(domain):
return integration
from . import components # pylint: disable=import-outside-toplevel
if integration := await hass.async_add_executor_job(
Integration.resolve_from_root, hass, components, domain
):
return integration
raise IntegrationNotFound(domain)
class LoaderError(Exception):
"""Loader base error."""
class IntegrationNotFound(LoaderError):
"""Raised when a component is not found."""
def __init__(self, domain: str) -> None:
"""Initialize a component not found error."""
super().__init__(f"Integration '{domain}' not found.")
self.domain = domain
class CircularDependency(LoaderError):
"""Raised when a circular dependency is found when resolving components."""
def __init__(self, from_domain: str, to_domain: str) -> None:
"""Initialize circular dependency error."""
super().__init__(f"Circular dependency detected: {from_domain} -> {to_domain}.")
self.from_domain = from_domain
self.to_domain = to_domain
def _load_file(
hass: HomeAssistant, comp_or_platform: str, base_paths: list[str]
) -> ModuleType | None:
"""Try to load specified file.
Looks in config dir first, then built-in components.
Only returns it if also found to be valid.
Async friendly.
"""
with suppress(KeyError):
return hass.data[DATA_COMPONENTS][comp_or_platform] # type: ignore
if (cache := hass.data.get(DATA_COMPONENTS)) is None:
if not _async_mount_config_dir(hass):
return None
cache = hass.data[DATA_COMPONENTS] = {}
for path in (f"{base}.{comp_or_platform}" for base in base_paths):
try:
module = importlib.import_module(path)
# In Python 3 you can import files from directories that do not
# contain the file __init__.py. A directory is a valid module if
# it contains a file with the .py extension. In this case Python
# will succeed in importing the directory as a module and call it
# a namespace. We do not care about namespaces.
# This prevents that when only
# custom_components/switch/some_platform.py exists,
# the import custom_components.switch would succeed.
# __file__ was unset for namespaces before Python 3.7
if getattr(module, "__file__", None) is None:
continue
cache[comp_or_platform] = module
return module
except ImportError as err:
# This error happens if for example custom_components/switch
# exists and we try to load switch.demo.
# Ignore errors for custom_components, custom_components.switch
# and custom_components.switch.demo.
white_listed_errors = []
parts = []
for part in path.split("."):
parts.append(part)
white_listed_errors.append(f"No module named '{'.'.join(parts)}'")
if str(err) not in white_listed_errors:
_LOGGER.exception(
("Error loading %s. Make sure all dependencies are installed"), path
)
return None
class ModuleWrapper:
"""Class to wrap a Python module and auto fill in hass argument."""
def __init__(self, hass: HomeAssistant, module: ModuleType) -> None:
"""Initialize the module wrapper."""
self._hass = hass
self._module = module
def __getattr__(self, attr: str) -> Any:
"""Fetch an attribute."""
value = getattr(self._module, attr)
if hasattr(value, "__bind_hass"):
value = ft.partial(value, self._hass)
setattr(self, attr, value)
return value
class Components:
"""Helper to load components."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Components class."""
self._hass = hass
def __getattr__(self, comp_name: str) -> ModuleWrapper:
"""Fetch a component."""
# Test integration cache
integration = self._hass.data.get(DATA_INTEGRATIONS, {}).get(comp_name)
if isinstance(integration, Integration):
component: ModuleType | None = integration.get_component()
else:
# Fallback to importing old-school
component = _load_file(self._hass, comp_name, _lookup_path(self._hass))
if component is None:
raise ImportError(f"Unable to load {comp_name}")
wrapped = ModuleWrapper(self._hass, component)
setattr(self, comp_name, wrapped)
return wrapped
class Helpers:
"""Helper to load helpers."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Helpers class."""
self._hass = hass
def __getattr__(self, helper_name: str) -> ModuleWrapper:
"""Fetch a helper."""
helper = importlib.import_module(f"homeassistant.helpers.{helper_name}")
wrapped = ModuleWrapper(self._hass, helper)
setattr(self, helper_name, wrapped)
return wrapped
def bind_hass(func: CALLABLE_T) -> CALLABLE_T:
"""Decorate function to indicate that first argument is hass."""
setattr(func, "__bind_hass", True)
return func
async def _async_component_dependencies(
hass: HomeAssistant,
start_domain: str,
integration: Integration,
loaded: set[str],
loading: set[str],
) -> set[str]:
"""Recursive function to get component dependencies.
Async friendly.
"""
domain = integration.domain
loading.add(domain)
for dependency_domain in integration.dependencies:
# Check not already loaded
if dependency_domain in loaded:
continue
# If we are already loading it, we have a circular dependency.
if dependency_domain in loading:
raise CircularDependency(domain, dependency_domain)
loaded.add(dependency_domain)
dep_integration = await async_get_integration(hass, dependency_domain)
if start_domain in dep_integration.after_dependencies:
raise CircularDependency(start_domain, dependency_domain)
if dep_integration.dependencies:
dep_loaded = await _async_component_dependencies(
hass, start_domain, dep_integration, loaded, loading
)
loaded.update(dep_loaded)
loaded.add(domain)
loading.remove(domain)
return loaded
def _async_mount_config_dir(hass: HomeAssistant) -> bool:
"""Mount config dir in order to load custom_component.
Async friendly but not a coroutine.
"""
if hass.config.config_dir is None:
_LOGGER.error("Can't load integrations - configuration directory is not set")
return False
if hass.config.config_dir not in sys.path:
sys.path.insert(0, hass.config.config_dir)
return True
def _lookup_path(hass: HomeAssistant) -> list[str]:
"""Return the lookup paths for legacy lookups."""
if hass.config.safe_mode:
return [PACKAGE_BUILTIN]
return [PACKAGE_CUSTOM_COMPONENTS, PACKAGE_BUILTIN]
|
the-stack_106_25857 | #!./parrott-env/bin/python
from app import app, collector
from apscheduler.scheduler import Scheduler
if __name__ == '__main__':
# Run the collector on start
collector.collect()
# Schedule the Collector
scheduler = Scheduler()
scheduler.add_interval_job(collector.collect, minutes=30)
scheduler.start()
# Awaken the Parrott
app.run(debug=True)
|
the-stack_106_25858 | import requests
from bs4 import BeautifulSoup
from .fetcher import Fetcher
from ..excepts import MangaNotFound
class Naver(Fetcher):
def __init__(self, link:str=None, manga:str=None, chapstart=1, collection=""):
super().__init__(link, manga, chapstart)
if collection:
self._collection = collection
self.domain = ".comic.naver.com"
if link is not None:
self._manga_id = link.partition("titleId=")[-1]
elif str(manga).isdigit():
self._manga_id = manga
else:
raise MangaNotFound(manga)
self._set_current_chap_info(self.chapter_number)
def _set_current_chap_info(self, chap_id):
self.npage = 1
self.chapter_number = int(chap_id)
self._chap_url = f"https://comic.naver.com/{self._collection}/detail.nhn?titleId={self._manga_id}&no={self.chapter_number}"
self._chap_req = requests.get(self._chap_url)
if self._chap_req.url == "https://comic.naver.com/main.nhn":
raise MangaNotFound(self._manga_id)
self._bs4 = BeautifulSoup(self._chap_req.text, "html.parser")
if self._chap_req.url == f"https://comic.naver.com/{self._collection}/list.nhn?titleId={self._manga_id}" or int(self._chap_req.url.partition("&no=")[-1]) != int(self.chapter_number):
raise MangaNotFound(f"{self._manga_id}, chapter {self.chapter_number}")
self.author = self._bs4.find("div", class_="comicinfo").findChild("span", class_="wrt_nm").text.strip()
self.chapter_name = self._bs4.find("div", class_="tit_area").findChild("h3").text
self.manga_name = self._bs4.find("div", class_="comicinfo").findChild("div", class_="detail").findChild("h2").contents[0]
self._img_list = self._bs4.find(class_="wt_viewer").findChildren("img")
self.image = self._img_list[0].get("src")
self.ext = self.image.split(".")[1]
def next_image(self):
self.image = self._img_list[self.npage].get("src")
self.ext = self.image.split(".")[1]
self.npage += 1
def go_to_chapter(self, chap):
self._set_current_chap_info(chap)
def next_chapter(self):
self._set_current_chap_info(self.chapter_number + 1)
def is_last_image(self):
return self.npage == len(self._img_list)
def is_last_chapter(self):
return self.chapter_number == int(self._bs4.find("div", class_="pg_area").findChild("span", class_="total").text)
@classmethod
def scan(cls, link:str=None, manga:str=None):
if link is not None:
manga_id = link.partition("titleId=")[-1]
elif str(manga).isdigit():
manga_id = manga
else:
raise MangaNotFound(manga)
req = requests.get(f"https://comic.naver.com/webtoon/detail.nhn?titleId={manga_id}")
if req.url == "https://comic.naver.com/main.nhn":
raise MangaNotFound(manga_id)
last_chap = int(req.url.split("&no=")[-1])
return list(range(1, last_chap + 1))
class NaverWebtoon(Naver):
_collection = "webtoon" # set at class level for the scan method
def __init__(self, link:str=None, manga:str=None, chapstart=1):
super().__init__(link, manga, chapstart)
class NaverBestChallenge(Naver):
_collection = "bestChallenge" # set at class level for the scan method
def __init__(self, link:str=None, manga:str=None, chapstart=1):
super().__init__(link, manga, chapstart)
class NaverChallenge(Naver):
_collection = "challenge" # set at class level for the scan method
def __init__(self, link:str=None, manga:str=None, chapstart=1):
super().__init__(link, manga, chapstart)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.