code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from preprocessing.vectorizers import Doc2VecVectorizer
from nnframework.data_builder import DataBuilder
import pandas as pd
import constants as const
import numpy as np
def generate_d2v_vectors(source_file):
df = pd.read_csv(source_file)
messages = df["Message"].values
vectorizer = Doc2VecVectorizer()
vectors = vectorizer.vectorize(messages)
return np.c_[df.iloc[:,0].values, vectors]
if __name__ == '__main__':
# Generate vectors (with index)
output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED)
# Save vectors as npy file
np.save(const.FILE_DOC2VEC_INPUTS_UNLABELLED, output)
|
[
"pandas.read_csv",
"numpy.save",
"preprocessing.vectorizers.Doc2VecVectorizer"
] |
[((219, 243), 'pandas.read_csv', 'pd.read_csv', (['source_file'], {}), '(source_file)\n', (230, 243), True, 'import pandas as pd\n'), ((298, 317), 'preprocessing.vectorizers.Doc2VecVectorizer', 'Doc2VecVectorizer', ([], {}), '()\n', (315, 317), False, 'from preprocessing.vectorizers import Doc2VecVectorizer\n'), ((579, 632), 'numpy.save', 'np.save', (['const.FILE_DOC2VEC_INPUTS_UNLABELLED', 'output'], {}), '(const.FILE_DOC2VEC_INPUTS_UNLABELLED, output)\n', (586, 632), True, 'import numpy as np\n')]
|
"""Microphone module."""
import alsaaudio
# pylint: disable=R0903, E1101
class Micro():
"""Class to use micro in a `with` bloc."""
def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE,
alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK):
"""Open the device in nonblocking capture mode.
The last argument could just as well have been zero for blocking mode.
Then we could have left out the sleep call in the bottom of the loop
"""
self.capture = alsaaudio_capture
self.nonblock = alsaaudio_nonblock
self.inp = None
def __enter__(self):
"""Set the acquisition and return it."""
self.inp = alsaaudio.PCM(self.capture, self.nonblock)
return self.inp
def __exit__(self, capture, nonblock, inpt):
"""Close the acquisition."""
self.inp.close()
|
[
"alsaaudio.PCM"
] |
[((690, 732), 'alsaaudio.PCM', 'alsaaudio.PCM', (['self.capture', 'self.nonblock'], {}), '(self.capture, self.nonblock)\n', (703, 732), False, 'import alsaaudio\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="M_qo7DmLJKLP"
# #Class-Conditional Bernoulli Mixture Model for EMNIST
# + [markdown] id="TU1pCzcIJHTm"
# ## Setup
#
# + id="400WanLyGA2C"
# !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
# %cd -q /pyprobml/scripts
# + id="k1rLl6dHH7Wh"
# !pip install -q superimport
# !pip install -q distrax
# + id="cLpBn5KQeB46"
from conditional_bernoulli_mix_lib import ClassConditionalBMM
from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class
from noisy_spelling_hmm import Word
from jax import vmap
import jax.numpy as jnp
import jax
from jax.random import PRNGKey, split
import numpy as np
from matplotlib import pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="ey9k06RweuKc" outputId="38131e5a-82fb-49db-c4d3-f4364a643152"
select_n = 25
dataset, targets = get_emnist_images_per_class(select_n)
dataset, targets = jnp.array(dataset), jnp.array(targets)
# + [markdown] id="KwNq7HYYLPO9"
# ## Initialization of Class Conditional BMMs
# + colab={"base_uri": "https://localhost:8080/"} id="UABtUDPjffFt" outputId="d873a708-542c-44e6-8c72-2c5908c7bbad"
n_mix = 30
n_char = 52
mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix))
p_min, p_max = 0.4, 0.6
n_pixels = 28 * 28
probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels)))
class_priors = jnp.array(np.full((n_char,), 1./n_char))
cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)
# + [markdown] id="Qa95Fua5Kc3i"
# ## Full Batch Gradient Descentt
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="PDzuEjs9Kewi" outputId="c81916c0-c6b7-45bd-d308-eab878afe281"
num_epochs, batch_size = 100, len(dataset)
losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="37mNMNrpInfh"
# ## EM Algorithm
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="FJeBzIKYfsUk" outputId="9d8db485-a251-4b1a-a6e5-93833c83dce6"
losses = cbm_em.fit_em(dataset, targets, 8)
plt.plot(losses, color="k", linewidth=3)
plt.xlabel("Iteration")
plt.ylabel("Negative Log Likelihood")
plt.show()
# + [markdown] id="NjCQpoH1Iuuf"
# ## Plot of the Probabilities of Components Distribution
# + id="KkyAHDW4JgyM"
def plot_components_dist(cbm, n_mix):
fig = plt.figure(figsize=(45, 20))
for k in range(n_mix):
for cls in range(cbm.num_of_classes):
plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1)
plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = "gray")
plt.axis('off')
plt.tight_layout()
plt.show()
# + [markdown] id="J8KLkCWpNAeF"
# ### GD
# + colab={"base_uri": "https://localhost:8080/", "height": 666} id="DSOiuNeAM8gl" outputId="dce9416a-b646-423d-b4bf-c78728db1cab"
plot_components_dist(cbm_gd, n_mix)
# + [markdown] id="FO31plUVNDSO"
# ### EM
# + id="ZM43qs6FfvlP" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="81a095f1-1099-4809-90a8-272dbed11662"
plot_components_dist(cbm_em, n_mix)
# + [markdown] id="IqRdcklzOeAY"
# ## Sampling
# + id="wgI6sFWKN4ax"
p1, p2, p3 = 0.4, 0.1, 2e-3
n_misspelled = 1 # number of misspelled words created for each class
vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band']
rng_key = PRNGKey(0)
keys = [dev_array for dev_array in split(rng_key, len(vocab))]
# + id="x3GpZ8jbf11N" colab={"base_uri": "https://localhost:8080/"} outputId="5a348b69-bdf4-4f80-f059-1062ba2fbb88"
hmms = {word: Word(word, p1, p2, p3, n_char, "all", mixing_coeffs=cbm_em.model.mixture_distribution.probs,
initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab}
samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys)
# + id="7VXVsobcg_KO" colab={"base_uri": "https://localhost:8080/"} outputId="3e915a79-7f5c-4131-d6ee-97f11c83d86f"
decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, "all")
get_decoded_samples(decoded_words)
# + [markdown] id="xrRy8MG0afR8"
# ### Figure
# + id="O0-HaN5rQAvP"
def plot_samples(samples):
samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28))
fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10))
fig.subplots_adjust(hspace = .2, wspace=.001)
for i, ax in enumerate(axes.flatten()):
ax.imshow(samples[i], cmap="gray")
ax.set_axis_off()
fig.tight_layout()
plt.show()
# + id="EbZn9vrfhei4" colab={"base_uri": "https://localhost:8080/", "height": 728} outputId="114217bf-cadb-4331-82ef-b4844c038342"
plot_samples(samples)
# + [markdown] id="eNDmwV7EPyrR"
# ## Calculation of Log Likelihoods for Test Data
# + id="525MUl5HPe1K"
# noisy words
test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-']
test_images = fake_test_data(test_words, dataset, targets, n_char + 1, "all")
# + id="1dFCdVNgPYtJ"
def plot_log_likelihood(hmms, test_words, test_images, vocab):
fig, axes = plt.subplots(4, 3, figsize=(20, 10))
for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)):
flattened_img = img.reshape((len(img), -1))
loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab)
loglikelihoods = jnp.array(loglikelihoods)
ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color="black")
ax.set_title(f'{word}')
plt.tight_layout()
plt.show()
# + id="qv-Df8GEhfC4" colab={"base_uri": "https://localhost:8080/", "height": 784} outputId="9be6abf3-0ecc-4ef5-e301-380c5eac38ff"
plot_log_likelihood(hmms, test_words, test_images, vocab)
|
[
"jax.nn.log_softmax",
"conditional_bernoulli_mix_utils.fake_test_data",
"jax.random.PRNGKey",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.full",
"conditional_bernoulli_mix_utils.get_decoded_samples",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"jax.vmap",
"matplotlib.pyplot.ylabel",
"noisy_spelling_hmm.Word",
"conditional_bernoulli_mix_utils.get_emnist_images_per_class",
"jax.numpy.array",
"numpy.random.uniform",
"conditional_bernoulli_mix_lib.ClassConditionalBMM",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((1137, 1174), 'conditional_bernoulli_mix_utils.get_emnist_images_per_class', 'get_emnist_images_per_class', (['select_n'], {}), '(select_n)\n', (1164, 1174), False, 'from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class\n'), ((1705, 1813), 'conditional_bernoulli_mix_lib.ClassConditionalBMM', 'ClassConditionalBMM', ([], {'mixing_coeffs': 'mixing_coeffs', 'probs': 'probs', 'class_priors': 'class_priors', 'n_char': 'n_char'}), '(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=\n class_priors, n_char=n_char)\n', (1724, 1813), False, 'from conditional_bernoulli_mix_lib import ClassConditionalBMM\n'), ((1818, 1926), 'conditional_bernoulli_mix_lib.ClassConditionalBMM', 'ClassConditionalBMM', ([], {'mixing_coeffs': 'mixing_coeffs', 'probs': 'probs', 'class_priors': 'class_priors', 'n_char': 'n_char'}), '(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=\n class_priors, n_char=n_char)\n', (1837, 1926), False, 'from conditional_bernoulli_mix_lib import ClassConditionalBMM\n'), ((2270, 2310), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'color': '"""k"""', 'linewidth': '(3)'}), "(losses, color='k', linewidth=3)\n", (2278, 2310), True, 'from matplotlib import pyplot as plt\n'), ((2311, 2334), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2321, 2334), True, 'from matplotlib import pyplot as plt\n'), ((2335, 2372), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Negative Log Likelihood"""'], {}), "('Negative Log Likelihood')\n", (2345, 2372), True, 'from matplotlib import pyplot as plt\n'), ((2373, 2383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2381, 2383), True, 'from matplotlib import pyplot as plt\n'), ((2613, 2653), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'color': '"""k"""', 'linewidth': '(3)'}), "(losses, color='k', linewidth=3)\n", (2621, 2653), True, 'from matplotlib import pyplot as plt\n'), ((2654, 2677), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2664, 2677), True, 'from matplotlib import pyplot as plt\n'), ((2678, 2715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Negative Log Likelihood"""'], {}), "('Negative Log Likelihood')\n", (2688, 2715), True, 'from matplotlib import pyplot as plt\n'), ((2716, 2726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2724, 2726), True, 'from matplotlib import pyplot as plt\n'), ((3944, 3954), 'jax.random.PRNGKey', 'PRNGKey', (['(0)'], {}), '(0)\n', (3951, 3954), False, 'from jax.random import PRNGKey, split\n'), ((4733, 4767), 'conditional_bernoulli_mix_utils.get_decoded_samples', 'get_decoded_samples', (['decoded_words'], {}), '(decoded_words)\n', (4752, 4767), False, 'from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class\n'), ((5598, 5661), 'conditional_bernoulli_mix_utils.fake_test_data', 'fake_test_data', (['test_words', 'dataset', 'targets', '(n_char + 1)', '"""all"""'], {}), "(test_words, dataset, targets, n_char + 1, 'all')\n", (5612, 5661), False, 'from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class\n'), ((1194, 1212), 'jax.numpy.array', 'jnp.array', (['dataset'], {}), '(dataset)\n', (1203, 1212), True, 'import jax.numpy as jnp\n'), ((1214, 1232), 'jax.numpy.array', 'jnp.array', (['targets'], {}), '(targets)\n', (1223, 1232), True, 'import jax.numpy as jnp\n'), ((1480, 1517), 'numpy.full', 'np.full', (['(n_char, n_mix)', '(1.0 / n_mix)'], {}), '((n_char, n_mix), 1.0 / n_mix)\n', (1487, 1517), True, 'import numpy as np\n'), ((1578, 1636), 'numpy.random.uniform', 'np.random.uniform', (['p_min', 'p_max', '(n_char, n_mix, n_pixels)'], {}), '(p_min, p_max, (n_char, n_mix, n_pixels))\n', (1595, 1636), True, 'import numpy as np\n'), ((1664, 1696), 'numpy.full', 'np.full', (['(n_char,)', '(1.0 / n_char)'], {}), '((n_char,), 1.0 / n_char)\n', (1671, 1696), True, 'import numpy as np\n'), ((2889, 2917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(45, 20)'}), '(figsize=(45, 20))\n', (2899, 2917), True, 'from matplotlib import pyplot as plt\n'), ((3218, 3236), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3234, 3236), True, 'from matplotlib import pyplot as plt\n'), ((3239, 3249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3247, 3249), True, 'from matplotlib import pyplot as plt\n'), ((4149, 4335), 'noisy_spelling_hmm.Word', 'Word', (['word', 'p1', 'p2', 'p3', 'n_char', '"""all"""'], {'mixing_coeffs': 'cbm_em.model.mixture_distribution.probs', 'initial_probs': 'cbm_em.model.components_distribution.distribution.probs', 'n_mix': 'n_mix'}), "(word, p1, p2, p3, n_char, 'all', mixing_coeffs=cbm_em.model.\n mixture_distribution.probs, initial_probs=cbm_em.model.\n components_distribution.distribution.probs, n_mix=n_mix)\n", (4153, 4335), False, 'from noisy_spelling_hmm import Word\n'), ((4600, 4637), 'jax.vmap', 'vmap', (['decode'], {'in_axes': '(0, None, None)'}), '(decode, in_axes=(0, None, None))\n', (4604, 4637), False, 'from jax import vmap\n'), ((4950, 4998), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)', 'nrows': '(10)', 'figsize': '(4, 10)'}), '(ncols=4, nrows=10, figsize=(4, 10))\n', (4962, 4998), True, 'from matplotlib import pyplot as plt\n'), ((5187, 5197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5195, 5197), True, 'from matplotlib import pyplot as plt\n'), ((5765, 5801), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(3)'], {'figsize': '(20, 10)'}), '(4, 3, figsize=(20, 10))\n', (5777, 5801), True, 'from matplotlib import pyplot as plt\n'), ((6223, 6241), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6239, 6241), True, 'from matplotlib import pyplot as plt\n'), ((6246, 6256), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6254, 6256), True, 'from matplotlib import pyplot as plt\n'), ((6071, 6096), 'jax.numpy.array', 'jnp.array', (['loglikelihoods'], {}), '(loglikelihoods)\n', (6080, 6096), True, 'import jax.numpy as jnp\n'), ((2997, 3069), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_mix', 'cbm.num_of_classes', '(cbm.num_of_classes * k + cls + 1)'], {}), '(n_mix, cbm.num_of_classes, cbm.num_of_classes * k + cls + 1)\n', (3008, 3069), True, 'from matplotlib import pyplot as plt\n'), ((3199, 3214), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3207, 3214), True, 'from matplotlib import pyplot as plt\n'), ((4640, 4658), 'jax.numpy.array', 'jnp.array', (['samples'], {}), '(samples)\n', (4649, 4658), True, 'import jax.numpy as jnp\n'), ((4880, 4897), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (4888, 4897), True, 'import numpy as np\n'), ((6134, 6168), 'jax.nn.log_softmax', 'jax.nn.log_softmax', (['loglikelihoods'], {}), '(loglikelihoods)\n', (6152, 6168), False, 'import jax\n')]
|
from __future__ import unicode_literals
import unittest
from ddf import DDFManager, DDF_HOME
class BaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dm_spark = DDFManager('spark')
cls.airlines = cls.loadAirlines(cls.dm_spark)
cls.mtcars = cls.loadMtCars(cls.dm_spark)
@classmethod
def tearDownClass(cls):
cls.dm_spark.shutdown()
@classmethod
def loadAirlines(cls, dm):
table_name = 'airlines_na_pyddf_unittest'
if table_name not in [x.split('\t')[0] for x in dm.sql('show tables')]:
dm.sql('set hive.metastore.warehouse.dir=/tmp', False)
dm.sql('drop table if exists {}'.format(table_name), False)
dm.sql("""create table {} (Year int,Month int,DayofMonth int,
DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int,
CRSArrTime int,UniqueCarrier string, FlightNum int,
TailNum string, ActualElapsedTime int, CRSElapsedTime int,
AirTime int, ArrDelay int, DepDelay int, Origin string,
Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int,
CancellationCode string, Diverted string, CarrierDelay int,
WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
""".format(table_name), False)
dm.sql("load data local inpath '{}/resources/test/airlineWithNA.csv' "
"into table {}".format(DDF_HOME, table_name), False)
return dm.sql2ddf('select * from {}'.format(table_name), False)
@classmethod
def loadMtCars(cls, dm):
table_name = 'mtcars_pyddf_unittest'
if table_name not in [x.split('\t')[0] for x in dm.sql('show tables')]:
dm.sql('set shark.test.data.path=resources', False)
# session.sql('set hive.metastore.warehouse.dir=/tmp')
dm.sql('drop table if exists {}'.format(table_name), False)
dm.sql("CREATE TABLE {} (mpg double, cyl int, disp double, "
"hp int, drat double, wt double, "
"qesc double, vs int, am int, gear int, carb int)"
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '".format(table_name), False)
dm.sql("LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' "
"INTO TABLE {}".format(DDF_HOME, table_name), False)
return dm.sql2ddf('select * from {}'.format(table_name), False)
|
[
"ddf.DDFManager"
] |
[((196, 215), 'ddf.DDFManager', 'DDFManager', (['"""spark"""'], {}), "('spark')\n", (206, 215), False, 'from ddf import DDFManager, DDF_HOME\n')]
|
import difflib
import pathlib
import argparse
from .utils import fail_with_message, progress_with_message, success_with_message
try:
import PyPDF2
except ImportError:
fail_with_message(
'Please install required dependencies before using this package.\n\t> pip3 install -r requirements.txt --user')
def parse_file(path: str):
if not pathlib.Path(path).exists():
raise argparse.ArgumentTypeError('invalid file path')
return path
def parse_ratio(x):
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError(
"%r not a floating-point literal" % (x,))
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]" % (x,))
return x
def diff(content1: str, content2: str):
return difflib.SequenceMatcher(None, content1, content2)
def has_deleted_item(diff):
for operation, *_ in diff.get_opcodes():
if operation == 'delete' or operation == 'replace':
return True
return False
def get_title(content):
return content.split('\n')[0]
def get_content(content):
return content.replace(get_title(content), '').strip()
def has_content(content):
return len(get_content(content)) != 0
def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float):
prev_page = pad_input.getPage(0)
nb_pages = pad_input.getNumPages()
for i in range(1, nb_pages):
progress_with_message('Sanitizing pdf ...', i / nb_pages)
current_page = pad_input.getPage(i)
current_content = current_page.extractText()
prev_content = prev_page.extractText()
diff_title = diff(get_title(prev_content), get_title(current_content))
diff_content = diff(get_content(prev_content),
get_content(current_content))
title_has_changed = diff_title.ratio() < title_ratio
content_has_changed = (diff_content.ratio() < content_ratio
and (has_deleted_item(diff_content) or len(prev_content) > len(current_content)))
if has_content(prev_content) and (title_has_changed or content_has_changed):
pdf_output.addPage(prev_page)
prev_page = current_page
pdf_output.addPage(prev_page)
parser = argparse.ArgumentParser(
description="Quickly remove useless page from a huge pdf to get a readable pdf")
parser.add_argument('input_file', type=parse_file,
help='pdf file to be sanitized')
parser.add_argument('output_file', type=str,
help='output sanitized pdf file name')
parser.add_argument('--title-ratio', type=parse_ratio,
help='float between [0, 1] which is responsible of detecting similar pages from title. The higher the ratio, the more sensitive the sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio')
parser.add_argument('--content-ratio', type=parse_ratio,
help='float between [0, 1] which is responsible of detecting similar pages from content. The higher the ratio, the more sensitive the sanitizer will be to any changes. (default: 0.8)',
default=.8, dest='content_ratio')
def main():
args = parser.parse_args()
pdf_input = PyPDF2.PdfFileReader(args.input_file)
pdf_output = PyPDF2.PdfFileWriter()
sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio)
with open(args.output_file, 'wb') as f:
pdf_output.write(f)
success_with_message(f'Your file has been sanitized at {args.output_file}')
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"difflib.SequenceMatcher",
"PyPDF2.PdfFileReader",
"pathlib.Path",
"PyPDF2.PdfFileWriter",
"argparse.ArgumentTypeError"
] |
[((2334, 2443), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Quickly remove useless page from a huge pdf to get a readable pdf"""'}), "(description=\n 'Quickly remove useless page from a huge pdf to get a readable pdf')\n", (2357, 2443), False, 'import argparse\n'), ((805, 854), 'difflib.SequenceMatcher', 'difflib.SequenceMatcher', (['None', 'content1', 'content2'], {}), '(None, content1, content2)\n', (828, 854), False, 'import difflib\n'), ((3319, 3356), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['args.input_file'], {}), '(args.input_file)\n', (3339, 3356), False, 'import PyPDF2\n'), ((3374, 3396), 'PyPDF2.PdfFileWriter', 'PyPDF2.PdfFileWriter', ([], {}), '()\n', (3394, 3396), False, 'import PyPDF2\n'), ((399, 446), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""invalid file path"""'], {}), "('invalid file path')\n", (425, 446), False, 'import argparse\n'), ((675, 738), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%r not in range [0.0, 1.0]' % (x,))"], {}), "('%r not in range [0.0, 1.0]' % (x,))\n", (701, 738), False, 'import argparse\n'), ((552, 620), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%r not a floating-point literal' % (x,))"], {}), "('%r not a floating-point literal' % (x,))\n", (578, 620), False, 'import argparse\n'), ((356, 374), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (368, 374), False, 'import pathlib\n')]
|
__author__ = 'palmer'
# every method in smoothing should accept (im,**args)
def median(im, **kwargs):
from scipy import ndimage
im = ndimage.filters.median_filter(im,**kwargs)
return im
def hot_spot_removal(xic, q=99.):
import numpy as np
xic_q = np.percentile(xic, q)
xic[xic > xic_q] = xic_q
return xic
|
[
"numpy.percentile",
"scipy.ndimage.filters.median_filter"
] |
[((141, 184), 'scipy.ndimage.filters.median_filter', 'ndimage.filters.median_filter', (['im'], {}), '(im, **kwargs)\n', (170, 184), False, 'from scipy import ndimage\n'), ((268, 289), 'numpy.percentile', 'np.percentile', (['xic', 'q'], {}), '(xic, q)\n', (281, 289), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from aldryn_apphooks_config.utils import get_app_instance
def apphooks_config(request):
namespace, config = get_app_instance(request)
return {
'namespace': namespace,
'config': config,
}
|
[
"aldryn_apphooks_config.utils.get_app_instance"
] |
[((179, 204), 'aldryn_apphooks_config.utils.get_app_instance', 'get_app_instance', (['request'], {}), '(request)\n', (195, 204), False, 'from aldryn_apphooks_config.utils import get_app_instance\n')]
|
# coding: utf-8
# MultiPerceptron
# queueを使った学習
# 学習step数を記録
# 学習データはCSVの代わりにジェネレータを搭載
# 3x11x4のNNモデルに変更
# scoreを追加
import os
_FILE_DIR=os.path.abspath(os.path.dirname(__file__))
import time
import tensorflow as tf
import threading
from sklearn.utils import shuffle
import sys
sys.path.append(_FILE_DIR+'/..')
from generator import SensorGenerator
import numpy as np
tf.reset_default_graph()
MODEL_DIR=_FILE_DIR+"/model"
SUMMARY_LOG_DIR=_FILE_DIR+"/log"
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
n_nodes_hl1 = 11
data_cols = 3 # センサーの数。left45,front,right45
n_classes = 4 # 予測結果の数。stop,left,forward,right
batch_size = 100 # バッチサイズは10〜100前後に
chunk_size = 100 # FIFOQueueのcapacity
target_step = 10000000 # ステップ数
TEST_NUM = 10000 # テストデータ件数
generator = SensorGenerator()
def generate_random_train_data(batch_size):
CSVDATA=[]
# 10m以内の判定を学習させる
#sensors = np.random.randint(0,1000,[batch_size,3])
# 前方20cm以内の判定を学習させる
#LEFT45 = np.random.randint(0,1000,batch_size)
#FRONT = np.random.randint(0,20,batch_size)
#RIGHT45 = np.random.randint(0,1000,batch_size)
# 前方20cm-100cm、左右100cm以内のの判定を学習させる
#LEFT45 = np.random.randint(0,100,batch_size)
#FRONT = np.random.randint(20,200,batch_size)
#RIGHT45 = np.random.randint(0,100,batch_size)
# 2m以内の判定を学習させる
#LEFT45 = np.random.randint(0,200,batch_size)
#FRONT = np.random.randint(0,200,batch_size)
#RIGHT45 = np.random.randint(0,200,batch_size)
# 1m以内の判定を学習させる
#LEFT45 = np.random.randint(0,100,batch_size)
#FRONT = np.random.randint(0,100,batch_size)
#RIGHT45 = np.random.randint(0,100,batch_size)
# 2m以内の判定を学習させる
sensors = np.random.randint(0,200,[batch_size,3])
#sensors = np.c_[LEFT45,FRONT,RIGHT45]
for i in range(batch_size):
GENERATOR_RESULT = generator.driving_instruction(sensors[i])
CSVROW = np.hstack((sensors[i],GENERATOR_RESULT[0:4]))
CSVDATA.append(CSVROW)
CSVDATA = np.array(CSVDATA)
batch_data = CSVDATA[0:batch_size,0:data_cols]
batch_target = CSVDATA[0:batch_size,data_cols:]
return batch_data, batch_target
def load_and_enqueue(sess):
while True:
try:
batch_data, batch_target = generate_random_train_data(batch_size)
sess.run(enqueue_op, feed_dict={placeholder_input_data:batch_data, placeholder_input_target:batch_target})
except tf.errors.CancelledError as e:
break
print("finished enqueueing")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
with tf.variable_scope("input"):
placeholder_input_data = tf.placeholder('float', [None, data_cols], name='input_data') # for load_and_enqueue. use dequeue_data_op for prediction
placeholder_input_target = tf.placeholder('float', name='input_target') # for load_and_enqueue. use dequeue_target_op for prediction
placeholder_batch_size = tf.placeholder(tf.int32, name='batch_size') # need feed_dict in training sess.run(). don't need for prediction.
with tf.variable_scope("step"):
placeholder_step = tf.placeholder(tf.int32, name='input_step') # step値入力用
variable_step = tf.Variable(initial_value=0, name="step") # step記録用
step_op = variable_step.assign(placeholder_step)
with tf.variable_scope("queue"):
queue = tf.FIFOQueue(
capacity=chunk_size, # enqueue size
dtypes=['float', 'float'],
shapes=[[data_cols], [n_classes]],
name='FIFOQueue'
)
# Enqueue and dequeue operations
enqueue_op = queue.enqueue_many([placeholder_input_data, placeholder_input_target], name='enqueue_op')
dequeue_data_op, dequeue_target_op = queue.dequeue_many(placeholder_batch_size, name='dequeue_op') # instead of data/target placeholder
with tf.variable_scope('neural_network_model'):
hidden_1_layer = {'weights':tf.Variable(weight_variable([data_cols, n_nodes_hl1])),
'biases':tf.Variable(bias_variable([n_nodes_hl1]))}
output_layer = {'weights':tf.Variable(weight_variable([n_nodes_hl1, n_classes])),
'biases':tf.Variable(bias_variable([n_classes])),}
l1 = tf.add(tf.matmul(dequeue_data_op,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
# 予測結果
prediction = tf.add(tf.matmul(l1,output_layer['weights']), output_layer['biases'], name='output_y')
# スコア
score = tf.nn.softmax(prediction, name='score')
with tf.variable_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=dequeue_target_op)
loss_op = tf.reduce_mean(losses, name='cost')
tf.summary.scalar('loss', loss_op)
with tf.variable_scope('accuracy'):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(dequeue_target_op, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name='accuracy')
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()
train_op = tf.train.AdamOptimizer(0.0001).minimize(loss_op, name='train_op')
saver = tf.train.Saver(max_to_keep=1000)
test_data, test_target =generate_random_train_data(TEST_NUM)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
if ckpt:
# checkpointファイルから最後に保存したモデルへのパスを取得する
last_model = ckpt.model_checkpoint_path
print("load {0}".format(last_model))
# 学習済みモデルを読み込む
saver.restore(sess, last_model)
LOAD_MODEL = True
else:
print("initialization")
# 初期化処理
init_op = tf.global_variables_initializer()
sess.run(init_op)
writer = tf.summary.FileWriter(SUMMARY_LOG_DIR, sess.graph)
start_time, start_clock = time.time(), time.clock()
# Start a thread to enqueue data asynchronously, and hide I/O latency.
coord = tf.train.Coordinator()
enqueue_thread = threading.Thread(target=load_and_enqueue, args=[sess])
enqueue_thread.isDaemon()
enqueue_thread.start()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
step = 0 # 最後にstep数をモデルに記録するために変数を用意しておく
try:
# check the accuracy before training (without feed_dict!)
print(sess.run(accuracy, feed_dict={placeholder_batch_size:chunk_size})) # check batch_size's data
# step取得
_step = sess.run(variable_step)
print("learned step:{}".format(_step))
for step in range(_step+1, target_step+1):
batch_loss=0
w_summary=None
_, batch_loss, w_summary = sess.run([train_op, loss_op, summary_op],
feed_dict={placeholder_batch_size:batch_size})
if step % 1000 == 0:
if not w_summary is None:
writer.add_summary(w_summary, step)
ac = sess.run(accuracy, feed_dict={placeholder_batch_size:chunk_size}) # check batch_size's data
# テストデータでの精度を確認する
test_accuracy = accuracy.eval({'queue/dequeue_op:0':test_data,
'queue/dequeue_op:1':test_target})
if step % 10000 == 0:
print("Step:%d accuracy:%.8f test_accuracy:%.8f loss:%.8f time:%.8f clock:%.14f" % (step,ac,test_accuracy,batch_loss,time.time()-start_time,time.clock()-start_clock))
# 1000000 step毎にsaveする
if step % 1000000 == 0:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
sess.run(queue.close(cancel_pending_enqueues=True))
except Exception as e:
# Report exceptions to the coodinator.
print(e)
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
# ステップ学習時、保存する
if step > _step:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
# テストデータを新たに生成し、精度を確認する
test_data, test_target =generate_random_train_data(TEST_NUM)
print('Accuracy:',accuracy.eval({dequeue_data_op:test_data,
dequeue_target_op:test_target}))
# 総step数を表示する
print('step:{}'.format(sess.run(variable_step)))
print("end")
|
[
"tensorflow.train.Coordinator",
"tensorflow.reset_default_graph",
"tensorflow.matmul",
"numpy.random.randint",
"tensorflow.Variable",
"tensorflow.truncated_normal",
"sys.path.append",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"os.path.dirname",
"os.path.exists",
"tensorflow.variable_scope",
"time.clock",
"tensorflow.train.start_queue_runners",
"generator.SensorGenerator",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.cast",
"tensorflow.FIFOQueue",
"tensorflow.summary.merge_all",
"tensorflow.train.get_checkpoint_state",
"threading.Thread",
"tensorflow.train.Saver",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"tensorflow.constant",
"numpy.hstack",
"os.makedirs",
"tensorflow.argmax",
"time.time",
"numpy.array",
"tensorflow.train.AdamOptimizer"
] |
[((278, 312), 'sys.path.append', 'sys.path.append', (["(_FILE_DIR + '/..')"], {}), "(_FILE_DIR + '/..')\n", (293, 312), False, 'import sys\n'), ((369, 393), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (391, 393), True, 'import tensorflow as tf\n'), ((776, 793), 'generator.SensorGenerator', 'SensorGenerator', ([], {}), '()\n', (791, 793), False, 'from generator import SensorGenerator\n'), ((5030, 5052), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5050, 5052), True, 'import tensorflow as tf\n'), ((5140, 5172), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1000)'}), '(max_to_keep=1000)\n', (5154, 5172), True, 'import tensorflow as tf\n'), ((153, 178), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'import os\n'), ((464, 489), 'os.path.exists', 'os.path.exists', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (478, 489), False, 'import os\n'), ((495, 517), 'os.makedirs', 'os.makedirs', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (506, 517), False, 'import os\n'), ((1670, 1712), 'numpy.random.randint', 'np.random.randint', (['(0)', '(200)', '[batch_size, 3]'], {}), '(0, 200, [batch_size, 3])\n', (1687, 1712), True, 'import numpy as np\n'), ((1964, 1981), 'numpy.array', 'np.array', (['CSVDATA'], {}), '(CSVDATA)\n', (1972, 1981), True, 'import numpy as np\n'), ((2518, 2556), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (2537, 2556), True, 'import tensorflow as tf\n'), ((2568, 2588), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2579, 2588), True, 'import tensorflow as tf\n'), ((2630, 2659), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (2641, 2659), True, 'import tensorflow as tf\n'), ((2671, 2691), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2682, 2691), True, 'import tensorflow as tf\n'), ((2698, 2724), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input"""'], {}), "('input')\n", (2715, 2724), True, 'import tensorflow as tf\n'), ((2755, 2816), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, data_cols]'], {'name': '"""input_data"""'}), "('float', [None, data_cols], name='input_data')\n", (2769, 2816), True, 'import tensorflow as tf\n'), ((2907, 2951), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'name': '"""input_target"""'}), "('float', name='input_target')\n", (2921, 2951), True, 'import tensorflow as tf\n'), ((3042, 3085), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""batch_size"""'}), "(tf.int32, name='batch_size')\n", (3056, 3085), True, 'import tensorflow as tf\n'), ((3161, 3186), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""step"""'], {}), "('step')\n", (3178, 3186), True, 'import tensorflow as tf\n'), ((3211, 3254), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""input_step"""'}), "(tf.int32, name='input_step')\n", (3225, 3254), True, 'import tensorflow as tf\n'), ((3286, 3327), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'name': '"""step"""'}), "(initial_value=0, name='step')\n", (3297, 3327), True, 'import tensorflow as tf\n'), ((3397, 3423), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""queue"""'], {}), "('queue')\n", (3414, 3423), True, 'import tensorflow as tf\n'), ((3437, 3555), 'tensorflow.FIFOQueue', 'tf.FIFOQueue', ([], {'capacity': 'chunk_size', 'dtypes': "['float', 'float']", 'shapes': '[[data_cols], [n_classes]]', 'name': '"""FIFOQueue"""'}), "(capacity=chunk_size, dtypes=['float', 'float'], shapes=[[\n data_cols], [n_classes]], name='FIFOQueue')\n", (3449, 3555), True, 'import tensorflow as tf\n'), ((3896, 3937), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""neural_network_model"""'], {}), "('neural_network_model')\n", (3913, 3937), True, 'import tensorflow as tf\n'), ((4366, 4380), 'tensorflow.nn.relu', 'tf.nn.relu', (['l1'], {}), '(l1)\n', (4376, 4380), True, 'import tensorflow as tf\n'), ((4519, 4558), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['prediction'], {'name': '"""score"""'}), "(prediction, name='score')\n", (4532, 4558), True, 'import tensorflow as tf\n'), ((4565, 4590), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (4582, 4590), True, 'import tensorflow as tf\n'), ((4605, 4694), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'prediction', 'labels': 'dequeue_target_op'}), '(logits=prediction, labels=\n dequeue_target_op)\n', (4644, 4694), True, 'import tensorflow as tf\n'), ((4704, 4739), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {'name': '"""cost"""'}), "(losses, name='cost')\n", (4718, 4739), True, 'import tensorflow as tf\n'), ((4744, 4778), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss_op'], {}), "('loss', loss_op)\n", (4761, 4778), True, 'import tensorflow as tf\n'), ((4785, 4814), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (4802, 4814), True, 'import tensorflow as tf\n'), ((4976, 5015), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (4993, 5015), True, 'import tensorflow as tf\n'), ((5239, 5251), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5249, 5251), True, 'import tensorflow as tf\n'), ((5272, 5312), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (5301, 5312), True, 'import tensorflow as tf\n'), ((5704, 5754), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['SUMMARY_LOG_DIR', 'sess.graph'], {}), '(SUMMARY_LOG_DIR, sess.graph)\n', (5725, 5754), True, 'import tensorflow as tf\n'), ((5899, 5921), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (5919, 5921), True, 'import tensorflow as tf\n'), ((5943, 5997), 'threading.Thread', 'threading.Thread', ([], {'target': 'load_and_enqueue', 'args': '[sess]'}), '(target=load_and_enqueue, args=[sess])\n', (5959, 5997), False, 'import threading\n'), ((6069, 6121), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord', 'sess': 'sess'}), '(coord=coord, sess=sess)\n', (6097, 6121), True, 'import tensorflow as tf\n'), ((1873, 1919), 'numpy.hstack', 'np.hstack', (['(sensors[i], GENERATOR_RESULT[0:4])'], {}), '((sensors[i], GENERATOR_RESULT[0:4]))\n', (1882, 1919), True, 'import numpy as np\n'), ((4277, 4330), 'tensorflow.matmul', 'tf.matmul', (['dequeue_data_op', "hidden_1_layer['weights']"], {}), "(dequeue_data_op, hidden_1_layer['weights'])\n", (4286, 4330), True, 'import tensorflow as tf\n'), ((4417, 4455), 'tensorflow.matmul', 'tf.matmul', (['l1', "output_layer['weights']"], {}), "(l1, output_layer['weights'])\n", (4426, 4455), True, 'import tensorflow as tf\n'), ((4839, 4863), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (4848, 4863), True, 'import tensorflow as tf\n'), ((4865, 4896), 'tensorflow.argmax', 'tf.argmax', (['dequeue_target_op', '(1)'], {}), '(dequeue_target_op, 1)\n', (4874, 4896), True, 'import tensorflow as tf\n'), ((4928, 4953), 'tensorflow.cast', 'tf.cast', (['correct', '"""float"""'], {}), "(correct, 'float')\n", (4935, 4953), True, 'import tensorflow as tf\n'), ((5065, 5095), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (5087, 5095), True, 'import tensorflow as tf\n'), ((5630, 5663), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5661, 5663), True, 'import tensorflow as tf\n'), ((5785, 5796), 'time.time', 'time.time', ([], {}), '()\n', (5794, 5796), False, 'import time\n'), ((5798, 5810), 'time.clock', 'time.clock', ([], {}), '()\n', (5808, 5810), False, 'import time\n'), ((7344, 7355), 'time.time', 'time.time', ([], {}), '()\n', (7353, 7355), False, 'import time\n'), ((7367, 7379), 'time.clock', 'time.clock', ([], {}), '()\n', (7377, 7379), False, 'import time\n')]
|
# Generated by Django 2.2.9 on 2020-02-14 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0050_data_migration'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='sites',
field=models.ManyToManyField(blank=True, help_text='The sites this catalog belongs to (in a multi site setup).', to='sites.Site', verbose_name='Sites'),
),
]
|
[
"django.db.models.ManyToManyField"
] |
[((334, 489), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""The sites this catalog belongs to (in a multi site setup)."""', 'to': '"""sites.Site"""', 'verbose_name': '"""Sites"""'}), "(blank=True, help_text=\n 'The sites this catalog belongs to (in a multi site setup).', to=\n 'sites.Site', verbose_name='Sites')\n", (356, 489), False, 'from django.db import migrations, models\n')]
|
import pytest
from copper_sdk import COPPER_API_TOKEN, COPPER_API_EMAIL
from copper_sdk.copper import Copper
@pytest.fixture(scope='session')
def copper():
return Copper(COPPER_API_TOKEN, COPPER_API_EMAIL)
|
[
"copper_sdk.copper.Copper",
"pytest.fixture"
] |
[((111, 142), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (125, 142), False, 'import pytest\n'), ((168, 210), 'copper_sdk.copper.Copper', 'Copper', (['COPPER_API_TOKEN', 'COPPER_API_EMAIL'], {}), '(COPPER_API_TOKEN, COPPER_API_EMAIL)\n', (174, 210), False, 'from copper_sdk.copper import Copper\n')]
|
# Copyright 2021 PaddleFSL Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlefsl.backbones import RCInitVector
vector_initializer = RCInitVector(corpus='glove-wiki', embedding_dim=50)
def get_idx_list_from_words_test():
idx_list = vector_initializer.get_idx_list_from_words('[PAD]')
print(idx_list)
idx_list = vector_initializer.get_idx_list_from_words(['i', 'love', 'you'])
print(idx_list)
def search_tokens_test():
vector = vector_initializer.search_tokens(['i', 'love', 'robin', '[PAD]'])
print(vector)
print(vector.shape)
def rc_init_vector_test():
vector = vector_initializer(
tokens=['yes', 'it', 'is', '*9*', '6$'],
head_position=[0],
tail_position=[2],
max_len=6
)
print(len(vector_initializer))
print(vector)
print(vector.shape)
if __name__ == '__main__':
get_idx_list_from_words_test()
search_tokens_test()
rc_init_vector_test()
|
[
"paddlefsl.backbones.RCInitVector"
] |
[((649, 700), 'paddlefsl.backbones.RCInitVector', 'RCInitVector', ([], {'corpus': '"""glove-wiki"""', 'embedding_dim': '(50)'}), "(corpus='glove-wiki', embedding_dim=50)\n", (661, 700), False, 'from paddlefsl.backbones import RCInitVector\n')]
|
from var_plots import plot_forecast
plot_forecast()
|
[
"var_plots.plot_forecast"
] |
[((37, 52), 'var_plots.plot_forecast', 'plot_forecast', ([], {}), '()\n', (50, 52), False, 'from var_plots import plot_forecast\n')]
|
#!/usr/bin/python3
import argparse
import sys
def readHashFile(hashfile):
f = open(hashfile)
hashes = f.read().split('\n')[:-1]
ntlm ={"cracked":{}, "safe":{}}
f.close()
for i in hashes:
try:
h = i.split(':')
ntlm["safe"][h[3].upper()] = h[0].lower()
except Exception as e:
pass
return hashes, ntlm
def searchLeaked(leakfile, ntlm, verbose):
leak = open(leakfile,"r")
cpt = 0
print("[*] Checking leaked database against hashes (long) ...", file=sys.stderr)
for line in leak:
if line[:-1] in ntlm["safe"]:
ntlm["cracked"][line[:-1]] = ntlm["safe"][line[:-1]]
cpt += 1
del(ntlm["safe"][line[:-1]])
if verbose:
print(line[:-1], ntlm["cracked"][line[:-1]])
print(f"{cpt} compromised", file=sys.stderr)
leak.close()
def export(ntlm, john_result_file='', output=''):
john = ''
if john_result_file:
f = open(john_result_file)
john = f.read().lower()
f.close()
if output:
f = open(output, "a+")
cpt = 0
for c in ntlm["cracked"]:
line = f"{ntlm['cracked'][c]}:<LeakTheWeak>:LEAK:NOLM:{c}:::"
if ntlm["cracked"][c] not in john:
if output :
f.write(line+'\n')
else:
print(line)
cpt += 1
if john_result_file:
print(f"New {cpt} compromised")
if output:
f.close()
def main():
parser = argparse.ArgumentParser(description='List accounts compromised in public leaked NTLMs', add_help=True)
parser.add_argument('-w', '--write', action="store", dest="path", default='',
help='A path to store the results. Default is stdout')
parser.add_argument('HASH_FILE', action="store",
help="The result file of impacket-secretsdump")
parser.add_argument('-j', '--john', action="store", dest="john_file", default='',
help="If used, only the accounts not cracked by john are displayed")
parser.add_argument('-v', '--verbose', action="store_true", dest="verbose", default=False,
help="display the cracked accounts in real time")
parser.add_argument('LEAK_FILE', action="store",
help="The wordlist containing the NTLM leaked")
args = parser.parse_args()
hashes, ntlm = readHashFile(args.HASH_FILE)
searchLeaked(args.LEAK_FILE, ntlm, args.verbose)
export(ntlm, args.john_file, args.path)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser"
] |
[((1512, 1619), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""List accounts compromised in public leaked NTLMs"""', 'add_help': '(True)'}), "(description=\n 'List accounts compromised in public leaked NTLMs', add_help=True)\n", (1535, 1619), False, 'import argparse\n')]
|
import os
import bpy
import sys
# Names of folder and files
args = sys.argv
source_file = args[-2]
convert_file = args[-1]
save_type = convert_file.split(".")[-1]
# Deleting all objects
for scene in bpy.data.scenes:
for obj in scene.objects:
scene.objects.unlink(obj)
for bpy_data_iter in (
bpy.data.objects,
bpy.data.meshes,
bpy.data.lamps,
bpy.data.cameras,
):
for id_data in bpy_data_iter:
bpy_data_iter.remove(id_data)
bpy.ops.object.select_by_type(type = "MESH")
bpy.ops.object.delete(use_global=False)
for item in bpy.data.meshes:
for scene in bpy.data.scenes:
for obj in scene.objects:
scene.objects.unlink(obj)
item.user_clear()
bpy.data.meshes.remove(item)
print("Scene cleared")
# Open model and save
try:
try:
print("Try to use plugin...")
bpy.ops.import_scene.deusexmd(filepath=source_file)
print("Success")
except:
try:
print("Fail")
print("Try to use outer script...")
try:
import import_DeusExMD
except:
print("Fail to import")
exit(2)
print("Successful module import; try to open model...")
import_DeusExMD.import_DeusExMD(source_file, #filepath
bpy.context, #context
False, #randomize_colors
True, #import_vertcolors
False, #skip_blank
False, #use_layers
1.0) #mesh_scale
print("Success")
except:
print("Fail")
exit(1)
print("\nModel opened\n")
if save_type == "obj":
bpy.ops.export_scene.obj(filepath=convert_file)
elif save_type == "fbx":
bpy.ops.export_scene.fbx(filepath=convert_file)
elif save_type == "3ds":
bpy.ops.export_scene.autodesk_3ds(filepath=convert_file)
elif save_type == "stl":
bpy.ops.export_mesh.stl(filepath=convert_file,
check_existing=False,
ascii=False)
else:
print("Incorrect save format")
print("\nConvertions done!")
exit(0)
# In case of error
except Exception:
print("\nSome errors here")
exit(1)
|
[
"bpy.ops.import_scene.deusexmd",
"bpy.data.meshes.remove",
"import_DeusExMD.import_DeusExMD",
"bpy.ops.object.delete",
"bpy.ops.export_mesh.stl",
"bpy.ops.export_scene.fbx",
"bpy.ops.object.select_by_type",
"bpy.ops.export_scene.obj",
"bpy.ops.export_scene.autodesk_3ds"
] |
[((513, 555), 'bpy.ops.object.select_by_type', 'bpy.ops.object.select_by_type', ([], {'type': '"""MESH"""'}), "(type='MESH')\n", (542, 555), False, 'import bpy\n'), ((559, 598), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {'use_global': '(False)'}), '(use_global=False)\n', (580, 598), False, 'import bpy\n'), ((770, 798), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['item'], {}), '(item)\n', (792, 798), False, 'import bpy\n'), ((912, 963), 'bpy.ops.import_scene.deusexmd', 'bpy.ops.import_scene.deusexmd', ([], {'filepath': 'source_file'}), '(filepath=source_file)\n', (941, 963), False, 'import bpy\n'), ((1913, 1960), 'bpy.ops.export_scene.obj', 'bpy.ops.export_scene.obj', ([], {'filepath': 'convert_file'}), '(filepath=convert_file)\n', (1937, 1960), False, 'import bpy\n'), ((2000, 2047), 'bpy.ops.export_scene.fbx', 'bpy.ops.export_scene.fbx', ([], {'filepath': 'convert_file'}), '(filepath=convert_file)\n', (2024, 2047), False, 'import bpy\n'), ((1320, 1414), 'import_DeusExMD.import_DeusExMD', 'import_DeusExMD.import_DeusExMD', (['source_file', 'bpy.context', '(False)', '(True)', '(False)', '(False)', '(1.0)'], {}), '(source_file, bpy.context, False, True, \n False, False, 1.0)\n', (1351, 1414), False, 'import import_DeusExMD\n'), ((2087, 2143), 'bpy.ops.export_scene.autodesk_3ds', 'bpy.ops.export_scene.autodesk_3ds', ([], {'filepath': 'convert_file'}), '(filepath=convert_file)\n', (2120, 2143), False, 'import bpy\n'), ((2183, 2269), 'bpy.ops.export_mesh.stl', 'bpy.ops.export_mesh.stl', ([], {'filepath': 'convert_file', 'check_existing': '(False)', 'ascii': '(False)'}), '(filepath=convert_file, check_existing=False, ascii=\n False)\n', (2206, 2269), False, 'import bpy\n')]
|
#
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from abc import ABC
import logging
import os
from os.path import abspath, dirname, join
import sys
import unittest
import torch
import random
import numpy as np
import pandas as pd
from merlion.models.defaults import DefaultDetector, DefaultDetectorConfig
from merlion.plot import plot_anoms_plotly
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import TimeSeries
from ts_datasets.anomaly import *
rootdir = dirname(dirname(dirname(abspath(__file__))))
logger = logging.getLogger(__name__)
def set_random_seeds():
torch.manual_seed(12345)
random.seed(12345)
np.random.seed(12345)
def get_train_test_splits(df: pd.DataFrame, metadata: pd.DataFrame, n: int) -> (pd.DataFrame, pd.DataFrame, np.ndarray):
train_df = df[metadata.trainval]
test_df = df[~metadata.trainval]
test_labels = pd.DataFrame(metadata[~metadata.trainval].anomaly)
return train_df.tail(n), test_df.head(n), test_labels[:n]
class Mixin(ABC):
def test_score(self):
print("-" * 80)
logger.info("test_score\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
test_ts = TimeSeries.from_pd(self.test_df)
score_ts = self.model.get_anomaly_score(test_ts)
scores = score_ts.to_pd().values.flatten()
min_score, max_score, sum_score = min(scores), max(scores), sum(scores)
logger.info(f"scores look like: {scores[:10]}")
logger.info(f"min score = {min_score}")
logger.info(f"max score = {max_score}")
logger.info(f"sum score = {sum_score}")
def test_save_load(self):
print("-" * 80)
logger.info("test_save_load\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
path = join(rootdir, "tmp", "default", "anom", "multi" if multi else "uni")
self.model.save(dirname=path)
loaded_model = DefaultDetector.load(dirname=path)
test_ts = TimeSeries.from_pd(self.test_df)
scores = self.model.get_anomaly_score(test_ts)
scores_np = scores.to_pd().values.flatten()
loaded_model_scores = loaded_model.get_anomaly_score(test_ts)
loaded_model_scores = loaded_model_scores.to_pd().values.flatten()
self.assertEqual(len(scores_np), len(loaded_model_scores))
alarms = self.model.post_rule(scores)
loaded_model_alarms = loaded_model.post_rule(scores)
self.assertSequenceEqual(list(alarms), list(loaded_model_alarms))
def test_plot(self):
try:
import plotly
print("-" * 80)
logger.info("test_plot\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
savedir = join(rootdir, "tmp", "default", "anom")
os.makedirs(savedir, exist_ok=True)
path = join(savedir, ("multi" if multi else "uni") + ".png")
test_ts = TimeSeries.from_pd(self.test_df)
fig = self.model.plot_anomaly_plotly(
time_series=test_ts, time_series_prev=train_ts, plot_time_series_prev=True
)
plot_anoms_plotly(fig, TimeSeries.from_pd(self.test_labels))
try:
import kaleido
fig.write_image(path, engine="kaleido")
except ImportError:
logger.info("kaleido not installed, not trying to save image")
except ImportError:
logger.info("plotly not installed, skipping test case")
class TestUnivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(
DefaultDetectorConfig(granularity="1h", threshold=AggregateAlarms(alm_threshold=1.5))
)
# Time series with anomalies in both train split and test split
df = pd.read_csv(join(rootdir, "data", "synthetic_anomaly", "horizontal_spike_anomaly.csv"))
df.timestamp = pd.to_datetime(df.timestamp, unit="s")
df = df.set_index("timestamp")
# Get training & testing splits
self.train_df = df.iloc[: -len(df) // 2, :1]
self.test_df = df.iloc[-len(df) // 2 :, :1]
self.test_labels = df.iloc[-len(df) // 2 :, -1:]
class TestMultivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(DefaultDetectorConfig(threshold=AggregateAlarms(alm_threshold=2)))
self.dataset = MSL(rootdir=join(rootdir, "data", "smap"))
df, metadata = self.dataset[0]
self.train_df, self.test_df, self.test_labels = get_train_test_splits(df, metadata, 2000)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.INFO
)
unittest.main()
|
[
"pandas.DataFrame",
"unittest.main",
"os.path.abspath",
"numpy.random.seed",
"os.makedirs",
"logging.basicConfig",
"torch.manual_seed",
"merlion.utils.TimeSeries.from_pd",
"merlion.models.defaults.DefaultDetector.load",
"merlion.post_process.threshold.AggregateAlarms",
"random.seed",
"pandas.to_datetime",
"os.path.join",
"logging.getLogger"
] |
[((716, 743), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (733, 743), False, 'import logging\n'), ((774, 798), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (791, 798), False, 'import torch\n'), ((803, 821), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (814, 821), False, 'import random\n'), ((826, 847), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (840, 847), True, 'import numpy as np\n'), ((1063, 1113), 'pandas.DataFrame', 'pd.DataFrame', (['metadata[~metadata.trainval].anomaly'], {}), '(metadata[~metadata.trainval].anomaly)\n', (1075, 1113), True, 'import pandas as pd\n'), ((5241, 5381), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'stream': 'sys.stdout', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n stream=sys.stdout, level=logging.INFO)\n", (5260, 5381), False, 'import logging\n'), ((5391, 5406), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5404, 5406), False, 'import unittest\n'), ((1387, 1420), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.train_df'], {}), '(self.train_df)\n', (1405, 1420), False, 'from merlion.utils import TimeSeries\n'), ((1475, 1507), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_df'], {}), '(self.test_df)\n', (1493, 1507), False, 'from merlion.utils import TimeSeries\n'), ((2097, 2130), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.train_df'], {}), '(self.train_df)\n', (2115, 2130), False, 'from merlion.utils import TimeSeries\n'), ((2215, 2283), 'os.path.join', 'join', (['rootdir', '"""tmp"""', '"""default"""', '"""anom"""', "('multi' if multi else 'uni')"], {}), "(rootdir, 'tmp', 'default', 'anom', 'multi' if multi else 'uni')\n", (2219, 2283), False, 'from os.path import abspath, dirname, join\n'), ((2345, 2379), 'merlion.models.defaults.DefaultDetector.load', 'DefaultDetector.load', ([], {'dirname': 'path'}), '(dirname=path)\n', (2365, 2379), False, 'from merlion.models.defaults import DefaultDetector, DefaultDetectorConfig\n'), ((2399, 2431), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_df'], {}), '(self.test_df)\n', (2417, 2431), False, 'from merlion.utils import TimeSeries\n'), ((4517, 4555), 'pandas.to_datetime', 'pd.to_datetime', (['df.timestamp'], {'unit': '"""s"""'}), "(df.timestamp, unit='s')\n", (4531, 4555), True, 'import pandas as pd\n'), ((686, 703), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (693, 703), False, 'from os.path import abspath, dirname, join\n'), ((3182, 3215), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.train_df'], {}), '(self.train_df)\n', (3200, 3215), False, 'from merlion.utils import TimeSeries\n'), ((3315, 3354), 'os.path.join', 'join', (['rootdir', '"""tmp"""', '"""default"""', '"""anom"""'], {}), "(rootdir, 'tmp', 'default', 'anom')\n", (3319, 3354), False, 'from os.path import abspath, dirname, join\n'), ((3367, 3402), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (3378, 3402), False, 'import os\n'), ((3422, 3475), 'os.path.join', 'join', (['savedir', "(('multi' if multi else 'uni') + '.png')"], {}), "(savedir, ('multi' if multi else 'uni') + '.png')\n", (3426, 3475), False, 'from os.path import abspath, dirname, join\n'), ((3499, 3531), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_df'], {}), '(self.test_df)\n', (3517, 3531), False, 'from merlion.utils import TimeSeries\n'), ((4418, 4492), 'os.path.join', 'join', (['rootdir', '"""data"""', '"""synthetic_anomaly"""', '"""horizontal_spike_anomaly.csv"""'], {}), "(rootdir, 'data', 'synthetic_anomaly', 'horizontal_spike_anomaly.csv')\n", (4422, 4492), False, 'from os.path import abspath, dirname, join\n'), ((3722, 3758), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['self.test_labels'], {}), '(self.test_labels)\n', (3740, 3758), False, 'from merlion.utils import TimeSeries\n'), ((5040, 5069), 'os.path.join', 'join', (['rootdir', '"""data"""', '"""smap"""'], {}), "(rootdir, 'data', 'smap')\n", (5044, 5069), False, 'from os.path import abspath, dirname, join\n'), ((4274, 4308), 'merlion.post_process.threshold.AggregateAlarms', 'AggregateAlarms', ([], {'alm_threshold': '(1.5)'}), '(alm_threshold=1.5)\n', (4289, 4308), False, 'from merlion.post_process.threshold import AggregateAlarms\n'), ((4970, 5002), 'merlion.post_process.threshold.AggregateAlarms', 'AggregateAlarms', ([], {'alm_threshold': '(2)'}), '(alm_threshold=2)\n', (4985, 5002), False, 'from merlion.post_process.threshold import AggregateAlarms\n')]
|
## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# Image size = 224*224 ->
self.conv1 = nn.Conv2d(1, 32, 5) # (224 - 5)/1 + 1 = 220 -> (32, 220, 220)
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2, 2) # 46 -> (32, 110, 110)
self.drop1 = nn.Dropout(p=0.2)
self.conv2 = nn.Conv2d(32, 64, 4) # (110 - 4)/1 + 1 = 43 -> (64, 107, 107)
self.drop2 = nn.Dropout(p=0.2) # after pooling -> (64, 53, 53)
self.conv3 = nn.Conv2d(64, 128, 3) # (53 - 3)/1 + 1 = 19 -> (128, 51, 51)
self.drop3 = nn.Dropout(p=0.2) # after pooling -> (128, 25, 25)
self.dense1 = nn.Linear(80000,1000) # 128*25*25 = 80000
self.drop4 = nn.Dropout(p=0.2)
self.dense2 = nn.Linear(1000,500)
self.drop5 = nn.Dropout(p=0.2)
self.dense3 = nn.Linear(500,136)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
x = self.drop1(self.pool(self.act(self.conv1(x))))
x = self.drop2(self.pool(self.act(self.conv2(x))))
x = self.drop3(self.pool(self.act(self.conv3(x))))
x = x.view(x.size(0), -1)
x = self.drop4(self.act(self.dense1(x)))
x = self.drop5(self.act(self.dense2(x)))
out = self.dense3(x)
return out
|
[
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
] |
[((1013, 1032), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)', '(5)'], {}), '(1, 32, 5)\n', (1022, 1032), True, 'import torch.nn as nn\n'), ((1094, 1103), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1101, 1103), True, 'import torch.nn as nn\n'), ((1124, 1142), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1136, 1142), True, 'import torch.nn as nn\n'), ((1187, 1204), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1197, 1204), True, 'import torch.nn as nn\n'), ((1235, 1255), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(4)'], {}), '(32, 64, 4)\n', (1244, 1255), True, 'import torch.nn as nn\n'), ((1318, 1335), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1328, 1335), True, 'import torch.nn as nn\n'), ((1398, 1419), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)'], {}), '(64, 128, 3)\n', (1407, 1419), True, 'import torch.nn as nn\n'), ((1480, 1497), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1490, 1497), True, 'import torch.nn as nn\n'), ((1562, 1584), 'torch.nn.Linear', 'nn.Linear', (['(80000)', '(1000)'], {}), '(80000, 1000)\n', (1571, 1584), True, 'import torch.nn as nn\n'), ((1625, 1642), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1635, 1642), True, 'import torch.nn as nn\n'), ((1674, 1694), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(500)'], {}), '(1000, 500)\n', (1683, 1694), True, 'import torch.nn as nn\n'), ((1715, 1732), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1725, 1732), True, 'import torch.nn as nn\n'), ((1764, 1783), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(136)'], {}), '(500, 136)\n', (1773, 1783), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Support for resource tree traversal.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from pyramid import traversal
from pyramid.compat import is_nonstr_iter
from pyramid.compat import decode_path_info
from pyramid.exceptions import URLDecodeError
from pyramid.httpexceptions import HTTPNotFound
from pyramid.interfaces import VH_ROOT_KEY
from pyramid.interfaces import ITraverser
from zope import interface
from zope.component import queryMultiAdapter
from zope.event import notify
from zope.location.interfaces import LocationError
from zope.traversing import api as ztraversing
from zope.traversing.interfaces import ITraversable
from zope.traversing.interfaces import BeforeTraverseEvent
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from zope.traversing.namespace import resource as _zresource
lineage = traversal.lineage
find_interface = traversal.find_interface
empty = traversal.empty
split_path_info = traversal.split_path_info
logger = __import__('logging').getLogger(__name__)
__all__ = [
'ZopeResourceTreeTraverser',
'resource',
]
def _notify_before_traverse_event(ob, request):
"""
Notifies a BeforeTraverseEvent, but safely: if the
handlers themselves raise a location error, turn that into
a HTTP 404 exception.
Because handlers are deliberately doing this, we stop
traversal and abort rather than try to return an information
dictionary and find a view and context, etc. This is limiting, but
safe.
"""
try:
notify(BeforeTraverseEvent(ob, request))
except LocationError:
# this is often a setup or programmer error
logger.debug("LocationError from traverse subscribers", exc_info=True)
raise HTTPNotFound("Traversal failed")
@interface.implementer(ITraverser)
class ZopeResourceTreeTraverser(traversal.ResourceTreeTraverser):
"""
A :class:`pyramid.interfaces.ITraverser` based on pyramid's
default traverser, but modified to use the
:mod:`zope.traversing.api` machinery instead of (only) dictionary
lookups. This provides is with the flexibility of the
:obj:`zope.traversing.interfaces.ITraversable` adapter pattern,
plus the support of namespace lookups
(:func:`zope.traversing.namespace.nsParse` and
:func:`zope.traversing.namespace.namespaceLookup`).
As this object traverses, it fires :obj:`~.IBeforeTraverseEvent`
events. If you either load the configuration from
:mod:`zope.app.publication` or manually enable the
:obj:`zope.site.site.threadSiteSubscriber <zope.site.site>` to
subscribe to this event, then any Zope site managers found along
the way will be made the current site.
"""
def __init__(self, root):
traversal.ResourceTreeTraverser.__init__(self, root)
def __call__(self, request): # pylint:disable=too-many-locals,too-many-branches,too-many-statements
"""
See :meth:`pyramid.interfaces.ITraversar.__call__`.
"""
# JAM: Unfortunately, the superclass implementation is entirely monolithic
# and we so we cannot reuse any part of it. Instead,
# we copy-and-paste it. Unless otherwise noted, comments below are
# original.
# JAM: Note the abundance of no covers. These are for features we are
# not currently using and the code is lifted directly from pyramid.
environ = request.environ
if request.matchdict is not None:
matchdict = request.matchdict
path = matchdict.get('traverse', '/') or '/'
if is_nonstr_iter(path):
# this is a *traverse stararg (not a {traverse})
# routing has already decoded these elements, so we just
# need to join them
path = '/'.join(path) or '/'
subpath = matchdict.get('subpath', ())
if not is_nonstr_iter(subpath): # pragma: no cover
# this is not a *subpath stararg (just a {subpath})
# routing has already decoded this string, so we just need
# to split it
subpath = split_path_info(subpath)
else: # pragma: no cover
# this request did not match a route
subpath = ()
try:
# empty if mounted under a path in mod_wsgi, for example
path = decode_path_info(environ['PATH_INFO'] or '/')
except KeyError:
path = '/'
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end,
e.reason)
if VH_ROOT_KEY in environ: # pragma: no cover
# HTTP_X_VHM_ROOT
vroot_path = decode_path_info(environ[VH_ROOT_KEY])
vroot_tuple = split_path_info(vroot_path)
# both will (must) be unicode or asciistr
vpath = vroot_path + path
vroot_idx = len(vroot_tuple) - 1
else:
vroot_tuple = ()
vpath = path
vroot_idx = -1
root = self.root
ob = vroot = root
if vpath == '/': # invariant: vpath must not be empty
# prevent a call to traversal_path if we know it's going
# to return the empty tuple
vpath_tuple = ()
else:
i = 0
view_selector = self.VIEW_SELECTOR
# A list so that remaining_path can be modified
vpath_tuple = list(split_path_info(vpath))
for segment in vpath_tuple:
# JAM: Fire traversal events, mainly so sites get installed. See
# zope.publisher.base.
_notify_before_traverse_event(ob, request)
# JAM: Notice that checking for '@@' is special cased, and
# doesn't go through the normal namespace lookup as it would in
# plain zope traversal. (XXX: Why not?)
if segment.startswith(view_selector): # pragma: no cover
return {'context': ob,
'view_name': segment[2:],
'subpath': vpath_tuple[i + 1:],
'traversed': vpath_tuple[:vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root}
try:
# JAM: This is where we differ. instead of using __getitem__,
# we use the traversing machinery.
# The zope app would use IPublishTraverser, which
# would install security proxies along the way. We probably don't need to
# do that? TODO:
# NOTE: By passing the request here, we require all traversers
# (including the namespace traversers) to be registered as multi-adapters.
# None of the default namespaces are. See our
# configure.zcml for what is.
# JAM: Damn stupid implementation of traversePathElement ignores
# the request argument to find a traversable /except/ when a namespace is found.
# therefore, we explicitly query for the multi adapter ourself in the non-namespace case
# (In the namespace case, we let traversing handle it, because it needs a named adapter
# after parsing)
traversable = None
if segment and segment[0] not in '+@' \
and not ITraversable.providedBy(ob):
try:
# Use the installed component registry
# instead of the request registry (which
# is the global component registry if
# pyramid was configured that way, or a
# standalone registry) in case the act of
# traversing has changed the site manager;
# zope.site.site.threadSiteSubscriber will
# do this for each BeforeTraverseEvent
# that's fired (though that's not
# registered by default).
traversable = queryMultiAdapter((ob, request),
ITraversable)
except TypeError:
# Some things are registered for "*" (DefaultTraversable)
# which means they get called here. If they can't take
# two arguments, then we bail. Sucks.
pass
remaining_path = vpath_tuple[i + 1:]
next_ob = ztraversing.traversePathElement(ob,
segment,
remaining_path,
traversable=traversable,
request=request)
if remaining_path != vpath_tuple[i + 1:]:
# Is this if check necessary? It would be faster to
# always assign
vpath_tuple[i + 1:] = remaining_path
except LocationError:
# LocationError is a type of KeyError. The DefaultTraversable turns
# plain KeyError and TypeErrors into LocationError.
return {'context': ob,
'view_name': segment,
'subpath': vpath_tuple[i + 1:],
'traversed': vpath_tuple[:vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root}
if i == vroot_idx: # pragma: no cover
vroot = next_ob
ob = next_ob
i += 1
# JAM: Also fire before traversal for the actual context item, since we
# won't actually traverse into it. Be sure not to fire multiple times
# for this (E.g., the root). This logic is complicated by the
# multi-returns above.
_notify_before_traverse_event(ob, request)
return {'context': ob,
'view_name': empty,
'subpath': subpath,
'traversed': vpath_tuple,
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root}
class resource(_zresource):
"""
Handles resource lookup in a way compatible with :mod:`zope.browserresource`.
This package registers resources as named adapters from :class:`.IDefaultBrowserLayer`
to Interface. We connect the two by making the pyramid request implement
the right thing.
"""
def __init__(self, context, request):
request = IBrowserRequest(request)
if not IDefaultBrowserLayer.providedBy(request):
interface.alsoProvides(request, IDefaultBrowserLayer) # We lie
super(resource, self).__init__(context, request)
|
[
"pyramid.traversal.ResourceTreeTraverser.__init__",
"zope.traversing.interfaces.ITraversable.providedBy",
"zope.traversing.interfaces.BeforeTraverseEvent",
"zope.publisher.interfaces.browser.IBrowserRequest",
"pyramid.compat.decode_path_info",
"zope.publisher.interfaces.browser.IDefaultBrowserLayer.providedBy",
"zope.component.queryMultiAdapter",
"zope.interface.implementer",
"pyramid.compat.is_nonstr_iter",
"pyramid.exceptions.URLDecodeError",
"zope.traversing.api.traversePathElement",
"pyramid.httpexceptions.HTTPNotFound",
"zope.interface.alsoProvides"
] |
[((1946, 1979), 'zope.interface.implementer', 'interface.implementer', (['ITraverser'], {}), '(ITraverser)\n', (1967, 1979), False, 'from zope import interface\n'), ((2915, 2967), 'pyramid.traversal.ResourceTreeTraverser.__init__', 'traversal.ResourceTreeTraverser.__init__', (['self', 'root'], {}), '(self, root)\n', (2955, 2967), False, 'from pyramid import traversal\n'), ((11356, 11380), 'zope.publisher.interfaces.browser.IBrowserRequest', 'IBrowserRequest', (['request'], {}), '(request)\n', (11371, 11380), False, 'from zope.publisher.interfaces.browser import IBrowserRequest\n'), ((1705, 1737), 'zope.traversing.interfaces.BeforeTraverseEvent', 'BeforeTraverseEvent', (['ob', 'request'], {}), '(ob, request)\n', (1724, 1737), False, 'from zope.traversing.interfaces import BeforeTraverseEvent\n'), ((1910, 1942), 'pyramid.httpexceptions.HTTPNotFound', 'HTTPNotFound', (['"""Traversal failed"""'], {}), "('Traversal failed')\n", (1922, 1942), False, 'from pyramid.httpexceptions import HTTPNotFound\n'), ((3743, 3763), 'pyramid.compat.is_nonstr_iter', 'is_nonstr_iter', (['path'], {}), '(path)\n', (3757, 3763), False, 'from pyramid.compat import is_nonstr_iter\n'), ((4925, 4963), 'pyramid.compat.decode_path_info', 'decode_path_info', (['environ[VH_ROOT_KEY]'], {}), '(environ[VH_ROOT_KEY])\n', (4941, 4963), False, 'from pyramid.compat import decode_path_info\n'), ((11396, 11436), 'zope.publisher.interfaces.browser.IDefaultBrowserLayer.providedBy', 'IDefaultBrowserLayer.providedBy', (['request'], {}), '(request)\n', (11427, 11436), False, 'from zope.publisher.interfaces.browser import IDefaultBrowserLayer\n'), ((11450, 11503), 'zope.interface.alsoProvides', 'interface.alsoProvides', (['request', 'IDefaultBrowserLayer'], {}), '(request, IDefaultBrowserLayer)\n', (11472, 11503), False, 'from zope import interface\n'), ((4055, 4078), 'pyramid.compat.is_nonstr_iter', 'is_nonstr_iter', (['subpath'], {}), '(subpath)\n', (4069, 4078), False, 'from pyramid.compat import is_nonstr_iter\n'), ((4546, 4591), 'pyramid.compat.decode_path_info', 'decode_path_info', (["(environ['PATH_INFO'] or '/')"], {}), "(environ['PATH_INFO'] or '/')\n", (4562, 4591), False, 'from pyramid.compat import decode_path_info\n'), ((4714, 4776), 'pyramid.exceptions.URLDecodeError', 'URLDecodeError', (['e.encoding', 'e.object', 'e.start', 'e.end', 'e.reason'], {}), '(e.encoding, e.object, e.start, e.end, e.reason)\n', (4728, 4776), False, 'from pyramid.exceptions import URLDecodeError\n'), ((9088, 9195), 'zope.traversing.api.traversePathElement', 'ztraversing.traversePathElement', (['ob', 'segment', 'remaining_path'], {'traversable': 'traversable', 'request': 'request'}), '(ob, segment, remaining_path, traversable=\n traversable, request=request)\n', (9119, 9195), True, 'from zope.traversing import api as ztraversing\n'), ((7818, 7845), 'zope.traversing.interfaces.ITraversable.providedBy', 'ITraversable.providedBy', (['ob'], {}), '(ob)\n', (7841, 7845), False, 'from zope.traversing.interfaces import ITraversable\n'), ((8583, 8629), 'zope.component.queryMultiAdapter', 'queryMultiAdapter', (['(ob, request)', 'ITraversable'], {}), '((ob, request), ITraversable)\n', (8600, 8629), False, 'from zope.component import queryMultiAdapter\n')]
|
import json
import os
from os import environ as env
from distutils.util import strtobool
if os.path.isfile("setting.json"):
with open("setting.json", "r", encoding="UTF-8_sig") as s:
setting = json.load(s)
else:
setting = {
"token": {
"discord": env["discord_token"],
"chunirec": env["chunirec_token"]
},
"logging": {
"logging": strtobool(env["logging"]),
"loglevel_stdio": env["loglevel_stdio"],
"loglevel_file": env["loglevel_file"],
"log_filename": env["log_filename"]
},
"misc": {
"channel_id": int(env["channel_id"]),
"timezone": env["timezone"],
"api_lifetime": int(env["api_lifetime"]),
"max_musics": int(env["max_musics"]),
"command_prefix": env["command_prefix"]
}
}
# URL
URL_chunirec = "https://reiwa.f5.si/chunirec_all.json"
URL_ONGEKI = "https://ongeki.sega.jp/assets/json/music/music.json"
URL_MAIMAI = "https://maimai.sega.jp/data/DXsongs.json"
URL_WACCA = "https://reiwa.f5.si/wacca_all.json"
URL_ARCAEA = "https://reiwa.f5.si/arcaea_all.json"
URL_PHIGROS = "https://reiwa.f5.si/phigros_all.json"
# トークン
CHUNIREC_TOKEN = setting["token"]["chunirec"]
DISCORD_TOKEN = setting["token"]["discord"]
# API関係
API_LIFETIME = int(setting["misc"]["api_lifetime"])
# logger関係
tz = setting["misc"]["timezone"]
is_logging = setting["logging"]["logging"]
loglevel_stdio = setting["logging"]["loglevel_stdio"]
loglevel_file = setting["logging"]["loglevel_file"]
log_filename = setting["logging"]["log_filename"]
# その他
MAX_MUSICS = setting["misc"]["max_musics"]
CMDPREF = setting["misc"]["command_prefix"]
APP_VERSION = "3.0"
CHANNEL_NAME = "選曲bot"
# ヘルプメッセージ
DECLARATION_NAME = f"**CHUNITHM Random Selector bot v{APP_VERSION}**"
HIGHLOW = f"""※`(:high/low)`がついているパラメータは、後ろに『:high』もしくは『:low』を付け足すことで『以上』『以下』を表すことができます。
`:up/:down`や`:big/:small`でも可能です。"""
HELPMES_CHUNITHM = f"""
{DECLARATION_NAME}
【コマンド文字列】
`{CMDPREF}random [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [ノーツ数(:high/low)] [BPM(:high/low)] [難易度]`
`{CMDPREF}search [レベル(:high/low)] [ジャンル] [アーティスト] [ノーツ数(:high/low)] [BPM(:high/low)] [難易度]`
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式、もしくは『12.6』『13.7』のような譜面定数形式で入力してください。
**ジャンル**
> 楽曲が属するジャンルを指定します。
> 『ORIGINAL』『POPS&ANIME』『niconico』『東方Project』『VARIETY』『イロドリミドリ』『ゲキマイ』から1つ選んで入力してください。
**アーティスト**
> 楽曲のアーティスト名を指定します。
> アーティスト名を入力してください。
> 大文字・小文字および全角・半角を考慮しない部分一致で検索されます。
**ノーツ数**
> 楽曲のノーツ数を指定します。
> 半角数字で入力してください。
**BPM**
> 楽曲のBPMを指定します。
> 半角数字で入力してください。
**難易度**
> 楽曲の難易度を指定します。EXPERTのみもしくはMASTERのみの検索をする場合に使用します。
> 指定する場合、『exp』もしくは『mas』と指定してください。
> 指定されないか、不正な値を指定した場合は自動的にEXPERTとMASTERの両方から検索します。
> レベルもしくはノーツ数が指定されたときのみ機能します。
【コマンド例】
`{CMDPREF}random`: 全楽曲の中からランダムに3曲選びます。
`{CMDPREF}random 5 13+:up`: レベル13+以上の楽曲の中からランダムに5曲選びます。
`{CMDPREF}random - 13 - - - - exp`: レベル13のEXPERTの楽曲をランダムに3曲選びます。
`{CMDPREF}search none 東方Project none 1000:low`: 東方Projectの楽曲の中からノーツ数が1000以下の楽曲を検索します。
`{CMDPREF}search - - - - 300:high`: 全楽曲の中からBPM300以上の楽曲を検索します。
【注意点】
- ジャンルは1つのみ指定可能です。
- WORLD'S ENDには対応していません。
- 一部の値が未登録になっている場合があります。
他、以下の楽曲の検索機能があります。
- オンゲキ: `{CMDPREF}help_ongeki`
- WACCA: `{CMDPREF}help_wacca`
"""
HELPMES_ONGEKI = f"""
{DECLARATION_NAME}
**オンゲキ選曲機能**
【コマンド文字列】
`{CMDPREF}random_ongeki [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}rgeki`でも可)
`{CMDPREF}search_ongeki [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}sgeki`でも可)
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式で入力してください。
> 譜面定数は使えません。
**ジャンル**
> 楽曲が属するジャンルを指定します。
> 『オンゲキ』『POPS&ANIME』『niconico』『東方Project』『VARIETY』『チュウマイ』から1つ選んで入力してください。
**アーティスト**
> 楽曲のアーティスト名を指定します。
> アーティスト名を入力してください。
> 大文字・小文字および全角・半角を考慮しない部分一致で検索されます。
**難易度**
> 楽曲の難易度を指定します。EXPERTのみもしくはMASTERのみの検索をする場合に使用します。
> 指定する場合、『exp』もしくは『mas』と指定してください。
> 指定されないか、不正な値を指定した場合は自動的にEXPERTとMASTERの両方から検索します。
> レベルが指定されたときのみ機能します。
【コマンド例】
`{CMDPREF}random_ongeki`: 全楽曲の中からランダムに3曲選びます。
`{CMDPREF}random_ongeki 5 13+:up`: レベル13+以上の楽曲の中からランダムに5曲選びます。
`{CMDPREF}random_ongeki - 13 - - - - exp`: レベル13のEXPERTの楽曲をランダムに3曲選びます。
`{CMDPREF}search_ongeki 14 東方Project`: 東方Projectの楽曲の中からレベル14の曲を検索します。
【注意点】
- ジャンルは1つのみ指定可能です。
- LUNATICおよびボーナストラックには対応していません。
"""
# 書きかけ
HELPMES_MAIMAI = f"""
{DECLARATION_NAME}
**maimaiでらっくす選曲機能**
【コマンド文字列】
`{CMDPREF}random_maimai [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}rmai`でも可)
`{CMDPREF}search_maimai [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}smai`でも可)
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式で入力してください。
> 譜面定数は使えません。
**ジャンル**
> 楽曲が属するジャンルを指定します。
"""
HELPMES_WACCA = f"""
{DECLARATION_NAME}
**WACCA選曲機能**
【コマンド文字列】
`{CMDPREF}random_wacca [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}rwacca`でも可)
`{CMDPREF}search_wacca [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}swacca`でも可)
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式で入力してください。
> 譜面定数は使えません。
**ジャンル**
> 楽曲が属するジャンルを指定します。
> 『アニメ/POP』『ボカロ』『東方アレンジ』『2.5次元』『バラエティ』『オリジナル』『TANO\\*C』『TANO\\*C(オリジナル)』から1つ選んで入力してください。
**アーティスト**
> 楽曲のアーティスト名を指定します。
> アーティスト名を入力してください。
> 大文字・小文字および全角・半角を考慮しない部分一致で検索されます。
**難易度**
> 楽曲の難易度を指定します。EXPERTのみもしくはINFERNOのみの検索をする場合に使用します。
> 指定する場合、『exp』もしくは『inf』と指定してください。
> 指定されないか、不正な値を指定した場合は自動的にEXPERTとINFERNOの両方から検索します。
> レベルが指定されたときのみ機能します。
【注意点】
- ジャンルは1つのみ指定可能です。
【不具合】
- 『オリジナル』を指定すると『TANO\\*C(オリジナル)』も同時に検索されてしまいます。
"""
|
[
"os.path.isfile",
"json.load",
"distutils.util.strtobool"
] |
[((93, 123), 'os.path.isfile', 'os.path.isfile', (['"""setting.json"""'], {}), "('setting.json')\n", (107, 123), False, 'import os\n'), ((206, 218), 'json.load', 'json.load', (['s'], {}), '(s)\n', (215, 218), False, 'import json\n'), ((406, 431), 'distutils.util.strtobool', 'strtobool', (["env['logging']"], {}), "(env['logging'])\n", (415, 431), False, 'from distutils.util import strtobool\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 18:29:44 2019
@author: tgadfort
"""
from debug import debugclass
from playTypes import noplay
from playYards import playyards
#from copy import deepcopy, copy
# create logger
import logging
module_logger = logging.getLogger('log.{0}'.format(__name__))
############################################################################################################
## Drive Class
############################################################################################################
class analyzepenalties:
def __init__(self):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 2*" "
self.sep = "======================================================"
self.dc = debugclass()
self.py = playyards()
def isPenaltyAdditive(self, gameData):
self.logger.debug("\n{0}".format(2*self.sep))
self.logger.debug("{0}Analyzing Penalty Additiveness".format(self.ind))
for idr,driveData in enumerate(gameData):
drivePlays = driveData.plays
for ipl,drivePlay in enumerate(drivePlays):
play = drivePlay.play
if play.penalty.isPenalty is False:
continue
penaltyyards = play.penalty.yards
playyards = play.yds.yards
nextYards = drivePlay.nextDiffYards
if all([penaltyyards, playyards, nextYards]):
if penaltyyards + playyards == nextYards:
continue
elif penaltyyards == playyards and penaltyyards == nextYards:
play.yds.yards = 0
continue
else:
self.logger.debug("{0}Penalty Analysis: Penalty Yards=={1}\tPlay Yards=={2}\tNext Yards=={3}\tText=={4}".format(self.ind, penaltyyards, playyards, nextYards, play.text))
else:
self.logger.debug("{0}Penalty Analysis: Penalty Yards=={1}\tPlay Yards=={2}\tNext Yards=={3}\tText=={4}".format(self.ind, penaltyyards, playyards, nextYards, play.text))
self.logger.debug("{0}Analyzing Penalty Additiveness -> Done".format(self.ind))
def penalties(self, gameData):
self.logger.debug("\n{0}".format(2*self.sep))
self.logger.debug("{0}Analyzing Penalties".format(self.ind))
for idr,driveData in enumerate(gameData):
drivePlays = driveData.plays
for ipl,drivePlay in enumerate(drivePlays):
play = drivePlay.play
if play.penalty.isPenalty is False:
continue
penaltyyards = self.py.findPenaltyYards(play.text)
nextYards = drivePlay.nextDiffYards
if isinstance(play, noplay):
if play.yds.yards == 0 and penaltyyards is not None:
play.yds.yards = penaltyyards
elif play.yds.yards == 0 and penaltyyards is None:
play.yds.yards = nextYards
else:
if play.yds.yards is None:
play.yds.yards = nextYards
else:
print("Not sure...")
if nextYards == 0 and play.yds.yards == 0:
penaltyyards = 0
if sum([x in play.text for x in ["Personal Foul", "Unsportsmanlike Conduct", "Face Mask"]]) > 0:
if nextYards == 15:
penaltyyards = 15
play.yds.yards = 0
elif nextYards == 15:
penaltyyards = -15
play.yds.yards = 0
if nextYards == penaltyyards:
if play.yds.yards == 0:
play.yds.yards = nextYards
play.penalty.yards = penaltyyards
if nextYards == play.yds.yards and nextYards == penaltyyards:
continue
self.logger.debug("{0}Penalty Analysis: Penalty=={1}\tPlay=={2}\tNext=={3}\tYards=={4}\tPYards=={5}\tText=={6}".format(self.ind, play.penalty.isPenalty, play.name, nextYards, play.yds.yards, penaltyyards, play.text))
self.logger.debug("{0}Analyzing Penalties -> Done".format(self.ind))
return gameData
|
[
"debug.debugclass",
"playYards.playyards"
] |
[((834, 846), 'debug.debugclass', 'debugclass', ([], {}), '()\n', (844, 846), False, 'from debug import debugclass\n'), ((874, 885), 'playYards.playyards', 'playyards', ([], {}), '()\n', (883, 885), False, 'from playYards import playyards\n')]
|
import random
import time
from Character import *
from Item import create_item
def battle(fighters, max_turn=10):
"""
Battle process start->loot
:param max_turn: int turns for 1 battle, default 10
:param fighters: list of fighter
:return: None
"""
# Enter battle_process
print('Enter battle_process')
# Begin battle_process
# Init skills (for n)
for fighter in fighters:
fighter.init_battle()
fighter.report_status()
# Turns begin
turn = 1
# Init turns
# fighters_this_turn = list(fighters)
fighters_remain = len(fighters)
while turn <= max_turn and fighters_remain >= 2:
# Enter Turn #turn
print('\n#{t}'.format(t=turn))
# Begin Turn #turn
# Init turn
# Construct fighters participate in this turn
# & Init fighter turn paras
fighters_this_turn = []
for fighter in fighters:
fighter.init_turn()
if fighter.is_alive():
fighters_this_turn.append(fighter)
# toimpr magical nb, in right place ?
# if turn != 1:
# fighter.gain_score(1)
# Choose skill
for fighter in fighters_this_turn:
# NPC choose skill
if fighter.is_npc:
npc = fighter
target = ''
target_list = list(set(fighters_this_turn) - {npc})
key = random.choice(npc.get_available_skills())
skill = npc.BATTLESKILLBOOK[key]
# If it's an A skill, choose its target
if skill.phase_type == 'A':
target = random.choice(target_list)
# Player input skill
else:
player = fighter
target_list = list(set(fighters_this_turn) - {player})
target = '' # dummy?
target_name = '' # dummy?
key = '' # dummy?
while True:
input_raw = input('BoLoBoLo...{s}:'
.format(s=str(player.get_available_skills())))
input_args = input_raw.strip().split()
key = input_args[0].upper()
target_name = input_args[1] if len(input_args) > 1 else ''
if key in player.get_available_skills():
break
skill = player.BATTLESKILLBOOK[key]
# If it's an A skill, choose its target
if skill.phase_type == 'A':
# Auto choose target when only 1 enemy
if len(target_list) == 1:
target = target_list[0]
else:
while True:
for target_fighter in target_list:
if target_fighter.name == target_name:
target = target_fighter
if target:
break
target_name = input('target...')
# Cast skill = record move, create proj, deliver proj
skill.cast(target)
# Billing
for fighter in fighters:
# Start billing by order
for category in ['potion', 'arrow']:
for tag in ['fill', 'drain', 'heal', 'damage']:
prjs = fighter.incoming_projectiles[category][tag]
if prjs:
for prj in prjs:
prj.billing()
# All billed
# Check new death
if fighter.is_alive() and fighter.HP <= 0:
# fighter will die
# fighter did die
# go_die = set alive false, record turn, leave death message
fighter.go_die(turn)
fighters_remain -= 1
# killer gain score for killing
for prj in fighter.lethal_projectiles:
# toImpr move magical nb to global setting
prj.caster.killed_someone()
# Output turn info
# Moves
for fighter in fighters:
print('{hp}hp {mp}mp\t[{f}]|\t{m}'
.format(hp=fighter.HP, mp=fighter.MP,
f=fighter.name, m=str(fighter.last_move)))
time.sleep(0.2)
# Deaths
for fighter in fighters_this_turn:
if not fighter.is_alive():
print('{f} 卒'.format(f=fighter.name))
turn += 1
continue
# Exit turns
print('\nExit turns')
# Exit battle_process
# Battle result
score_board = sorted(fighters, key=lambda f: (f.score, f.died_turn), reverse=True)
for index, fighter in enumerate(score_board):
if fighter.is_alive():
status = '存活({hp}HP)'.format(hp=fighter.HP)
else:
killers = []
for pj in fighter.lethal_projectiles:
killer = '{owner}的{skill}'. \
format(owner=pj.caster.name, skill=pj.skill.alias)
killers.append(killer)
killers_msg = '&'.join(killers)
status = '卒(Turn{t}, {hp}HP, 被{killer}所杀)' \
.format(t=fighter.died_turn, hp=fighter.HP,
killer=killers_msg)
print('#{i}\t{f}\t\t*{score}*\t\t{status}'.format(i=index+1, f=fighter.name, score=fighter.score, status=status))
# distribute_loot(player, enemy, battle_result)
# toAdd loot system
# def distribute_loot(player, enemy, battle_result):
# """
# Distribute enemy loot to player according to battle result
# :param player: obj
# :param enemy: obj
# :param battle_result: string 'WIN'/'LOSE'/'TIE'
# :return: None
# """
# if battle_result == 'WIN':
# c_money = 1
# c_exp = 1
# loot_item_dict = enemy.loot['item_dict']
# elif battle_result == 'TIE':
# c_money = 0
# c_exp = 0.5
# loot_item_dict = {}
# elif battle_result == 'LOSE':
# c_money = 0
# c_exp = 0
# loot_item_dict = {}
#
# else:
# return
#
# player.add_money(enemy.loot['money'] * c_money)
# player.add_exp(enemy.loot['EXP'] * c_exp)
# for loot_item in loot_item_dict.keys():
# player.get_item(loot_item, loot_item_dict[loot_item])
# toImpr separated gameplay
if __name__ == "__main__":
glove1 = create_item('A2')
glove2 = create_item('A2')
# toAdd item database
# game_items = [glove1, glove2]
zcx = Fighter('zcx')
zcx.put_in_item(glove1)
zcx.equip_item(glove1)
iii = Fighter('i', is_npc=True)
iii.put_in_item(glove2)
iii.equip_item(glove2)
j = Fighter('j', is_npc=True)
k = Fighter('k', is_npc=True)
allFighters = [zcx, iii, j, k]
battle(allFighters)
# player1.report_wealth()
# print(player1.level)
|
[
"Item.create_item",
"random.choice",
"time.sleep"
] |
[((6694, 6711), 'Item.create_item', 'create_item', (['"""A2"""'], {}), "('A2')\n", (6705, 6711), False, 'from Item import create_item\n'), ((6726, 6743), 'Item.create_item', 'create_item', (['"""A2"""'], {}), "('A2')\n", (6737, 6743), False, 'from Item import create_item\n'), ((4538, 4553), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4548, 4553), False, 'import time\n'), ((1737, 1763), 'random.choice', 'random.choice', (['target_list'], {}), '(target_list)\n', (1750, 1763), False, 'import random\n')]
|
# Generated by Django 2.1.7 on 2019-08-06 16:38
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('galerias', '0002_auto_20190805_1410'),
]
operations = [
migrations.AddField(
model_name='galeriaimagenes',
name='aprobado',
field=models.BooleanField(default=True),
preserve_default=False,
),
migrations.AddField(
model_name='galeriavideos',
name='aprobado',
field=models.BooleanField(default=True),
preserve_default=False,
),
migrations.AddField(
model_name='galeriavideos',
name='descripcion',
field=ckeditor_uploader.fields.RichTextUploadingField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='galeriavideos',
name='portada',
field=sorl.thumbnail.fields.ImageField(blank=True, null=True, upload_to='galerias/', verbose_name='Imagen'),
),
migrations.AlterField(
model_name='galeriaimagenes',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Autor'),
),
migrations.AlterField(
model_name='galeriavideos',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Autor'),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.BooleanField"
] |
[((522, 555), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (541, 555), False, 'from django.db import migrations, models\n'), ((726, 759), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (745, 759), False, 'from django.db import migrations, models\n'), ((1423, 1544), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Autor"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n settings.AUTH_USER_MODEL, verbose_name='Autor')\n", (1440, 1544), False, 'from django.db import migrations, models\n'), ((1674, 1795), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Autor"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n settings.AUTH_USER_MODEL, verbose_name='Autor')\n", (1691, 1795), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains frame widget implementation
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import Qt
from Qt.QtWidgets import QSizePolicy, QFrame
from Qt.QtGui import QPainter, QPainterPath
class WelcomeFrame(QFrame, object):
def __init__(self, pixmap, parent=None):
super(WelcomeFrame, self).__init__(parent)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setFrameShape(QFrame.NoFrame)
self.setFrameShadow(QFrame.Plain)
self._pixmap = pixmap
self.setStyleSheet('QFrame { border-radius: 10px; }')
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
def paintEvent(self, event):
"""
Override base QFrame paintEvent function
:param event: QPaintEvent
"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
path = QPainterPath()
path.addRoundedRect(0, 0, self.width(), self.height(), 10, 10)
painter.setClipPath(path)
painter.drawPixmap(0, 0, self.width(), self.height(), self._pixmap)
|
[
"Qt.QtGui.QPainterPath",
"Qt.QtGui.QPainter"
] |
[((885, 899), 'Qt.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (893, 899), False, 'from Qt.QtGui import QPainter, QPainterPath\n'), ((968, 982), 'Qt.QtGui.QPainterPath', 'QPainterPath', ([], {}), '()\n', (980, 982), False, 'from Qt.QtGui import QPainter, QPainterPath\n')]
|
# treemodels.py
from __future__ import division
import gtk
from debug import *
import numpy as np
import matplotlib.pyplot as plt
class CountingActivitiesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for CountingActivity's in a Log."""
def __init__ (self, log):
gtk.GenericTreeModel.__init__ (self)
self.log = log
@property
def n_rows (self):
return len (self.log.counting_activities)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 2
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.log.counting_activities):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if len (self.log.counting_activities) == 0:
return None
activity = sorted (self.log.counting_activities)[row]
if col == 0:
return activity.name
elif col == 1:
return activity.unit
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class TimingActivitiesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for TimingActivity's in a Log."""
def __init__ (self, log):
gtk.GenericTreeModel.__init__ (self)
self.log = log
@property
def n_rows (self):
return len (self.log.timing_activities)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 1
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.log.timing_activities):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if len (self.log.timing_activities) == 0:
return None
activity = sorted (self.log.timing_activities)[row]
if col == 0:
return activity.name
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class CountingEntriesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for CountingEntry's in a Log."""
def __init__ (self, log, activity_name):
gtk.GenericTreeModel.__init__ (self)
self.log = log
self.activity_name = activity_name
@property
def entries (self):
return self.log.get_entries (self.activity_name)
@property
def n_rows (self):
return len (self.entries)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 3
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.entries):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if self.n_rows == 0:
return None
entry = self.entries[row]
if col == 0:
return str (entry.date)
elif col == 1:
return str (entry.n)
elif col == 2:
return str (entry.error)
elif col == 3:
return str (entry.note)
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class TimingEntriesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for TimingEntry's in a Log."""
def __init__ (self, log, activity_name):
gtk.GenericTreeModel.__init__ (self)
self.log = log
self.activity_name = activity_name
@property
def entries (self):
return self.log.get_entries (self.activity_name)
@property
def n_rows (self):
return len (self.entries)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 2
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.entries):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
def fmt (t):
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}'.format (
t.year, t.month, t.day, t.hour, t.minute)
if self.n_rows == 0:
return None
entry = self.entries[row]
if col == 0:
return fmt (entry.start_time)
elif col == 1:
return fmt (entry.end_time)
elif col == 2:
return str (entry.note)
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class ActivityDrawModel (gtk.GenericTreeModel):
"""Gtk TreeModel for drawing Activity's in a Log."""
def __init__ (self, activities):
gtk.GenericTreeModel.__init__ (self)
self.activities = sorted (activities)
self.checks = [
False for activity in self.activities]
n = len (self.activities)
##mpl_colors = [
## (0.0, 0.0, 1.0),
## (0.0, 0.5, 0.0),
## (1.0, 0.0, 0.0),
## (0.0, 0.75, 0.75),
## (0.75, 0.0, 0.75),
## (0.75, 0.75, 0.0),
## (0.0, 0.0, 0.0),
## (0.0, 0.0, 1.0) ]
##n_color = len (mpl_colors)
##self.colors = [
## gtk.gdk.Color (*mpl_colors[i % n_color]) for i in xrange (n)]
cm = plt.get_cmap ('rainbow')
self.colors = [
gtk.gdk.Color (*cm (i / n)[:3])
for i in xrange (n)]
self.color_tuples = [
cm (i / n)[:3]
for i in xrange (n)]
self.alphas = [
int (.8 * 65535) for activity in self.activities]
@property
def n_rows (self):
return len (self.activities)
# toggle
def toggle (self, path):
row = int (path)
self.checks[row] = not self.checks[row]
def toggle_all (self):
if np.sum (self.checks) == len (self.checks):
value = False
else:
value = True
for row in xrange (len (self.checks)):
self.checks[row] = value
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 3
def on_get_column_type (self, index):
if index == 0:
return bool
elif index == 1:
return str
elif index == 2:
return gtk.gdk.Pixbuf
def on_get_iter (self, path):
if self.n_rows:
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if self.n_rows == 0:
return None
activity = sorted (self.activities)[row]
if col == 0:
return self.checks[row]
elif col == 1:
return activity.name
else:
pb = gtk.gdk.Pixbuf (
gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16)
color = self.colors[row]
color_str = '{0:02x}{1:02x}{2:02x}{3:02x}'.format (
*map (int,
(color.red / 256, color.green / 256, color.blue / 256,
self.alphas[row] / 256)))
pb.fill (int (color_str, 16))
return pb
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
|
[
"gtk.GenericTreeModel.__init__",
"gtk.gdk.Pixbuf",
"numpy.sum",
"matplotlib.pyplot.get_cmap"
] |
[((286, 321), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (315, 321), False, 'import gtk\n'), ((1999, 2034), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (2028, 2034), False, 'import gtk\n'), ((3661, 3696), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (3690, 3696), False, 'import gtk\n'), ((5561, 5596), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (5590, 5596), False, 'import gtk\n'), ((7563, 7598), 'gtk.GenericTreeModel.__init__', 'gtk.GenericTreeModel.__init__', (['self'], {}), '(self)\n', (7592, 7598), False, 'import gtk\n'), ((8227, 8250), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (8239, 8250), True, 'import matplotlib.pyplot as plt\n'), ((8763, 8782), 'numpy.sum', 'np.sum', (['self.checks'], {}), '(self.checks)\n', (8769, 8782), True, 'import numpy as np\n'), ((9753, 9808), 'gtk.gdk.Pixbuf', 'gtk.gdk.Pixbuf', (['gtk.gdk.COLORSPACE_RGB', '(True)', '(8)', '(16)', '(16)'], {}), '(gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16)\n', (9767, 9808), False, 'import gtk\n')]
|
import sys
import torch
from torch.autograd import Variable
import numpy as np
import os
from os import path
import argparse
import random
import copy
from tqdm import tqdm
import pickle
from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, \
read_processed_scores, read_scores
from scipy.stats import spearmanr, pearsonr, kendalltau
import math
from torchvision import models
from resources import MODEL_WEIGHT_DIR
from resources import OUTPUTS_DIR
from matplotlib import pyplot as plt
import csv
def parse_split_data(sorted_scores, train_percent, dev_percent, prompt='structure'):
train = {}
dev = {}
test = {}
all = {}
topic_count = 0
for article_id, scores_list in tqdm(sorted_scores.items()):
entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
entry['sys_summ' + repr(sid)] = [s['scores'][prompt] for s in scores_list if s['summ_id'] == sid][
0] # that can be done more efficiently, but who cares...
rand = random.random()
all[article_id] = entry
if rand < train_percent:
train[article_id] = entry
elif rand < train_percent + dev_percent:
dev[article_id] = entry
else:
test[article_id] = entry
topic_count += 1
print("topics in parse_split_data", topic_count)
return train, dev, test, all
def parse_split_data_balanced(sorted_scores, train_percent, dev_percent, prompt='structure'):
train = {}
dev = {}
test = {}
all = {}
topic_count = 0
article_ids = list(sorted_scores.keys())
random.shuffle(article_ids)
num_articles = len(article_ids)
train_ids = article_ids[0:int(train_percent * num_articles)]
dev_ids = article_ids[int(train_percent * num_articles):int((train_percent + dev_percent) * num_articles)]
# test_ids=article_ids[int((train_percent+dev_percent)*num_articles):]
for article_id, scores_list in tqdm(sorted_scores.items()):
entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
entry['sys_summ' + repr(sid)] = [s['scores'][prompt] for s in scores_list if s['summ_id'] == sid][
0] # that can be done more efficiently, but who cares...
# rand = random.random()
all[article_id] = entry
if article_id in train_ids:
train[article_id] = entry
elif article_id in dev_ids:
dev[article_id] = entry
else:
test[article_id] = entry
topic_count += 1
print("topics in parse_split_data", topic_count)
return train, dev, test, all
def build_model(model_type, vec_length, learn_rate=None):
if 'linear' in model_type:
deep_model = torch.nn.Sequential(
torch.nn.Linear(vec_length, 1),
)
else:
deep_model = torch.nn.Sequential(
torch.nn.Linear(vec_length, int(vec_length / 2)),
torch.nn.ReLU(),
torch.nn.Linear(int(vec_length / 2), 1),
)
if learn_rate is not None:
optimiser = torch.optim.Adam(deep_model.parameters(), lr=learn_rate)
return deep_model, optimiser
else:
return deep_model
def deep_pair_train(vec_list, target, deep_model, optimiser, device):
# print(np.array(vec_list).shape)
input = Variable(torch.from_numpy(np.array(vec_list)).float())
# print(input)
if 'gpu' in device:
input = input.to('cuda')
value_variables = deep_model(input)
# print(value_variables)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(value_variables)
# print(pred)
# print(np.array(target).shape, np.array(target).reshape(-1, 2, 1).shape)
target_variables = Variable(torch.from_numpy(np.array(target)).float()).view(-1, 2, 1)
# print(target_variables)
if 'gpu' in device:
target_variables = target_variables.to('cuda')
loss_fn = torch.nn.BCELoss()
loss = loss_fn(pred, target_variables)
# print(loss)
optimiser.zero_grad()
loss.backward()
optimiser.step()
return loss.cpu().item()
def deep_pair_train_loss_only(vec_list, target, deep_model, optimiser, device):
# print(np.array(vec_list).shape)
input = Variable(torch.from_numpy(np.array(vec_list)).float())
# print(input)
if 'gpu' in device:
input = input.to('cuda')
value_variables = deep_model(input)
# print(value_variables)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(value_variables)
# print(pred)
# print(np.array(target).shape, np.array(target).reshape(-1, 2, 1).shape)
target_variables = Variable(torch.from_numpy(np.array(target)).float()).view(-1, 2, 1)
# print(target_variables)
if 'gpu' in device:
target_variables = target_variables.to('cuda')
loss_fn = torch.nn.BCELoss()
loss = loss_fn(pred, target_variables)
# print(loss)
return loss.cpu().item()
def build_pairs(entries):
pair_list = []
topic_count = 0
summ_count = 0
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
if i == j: continue
if entry[summ_ids[i]] > entry[summ_ids[j]]:
pref = [1, 0]
elif entry[summ_ids[i]] < entry[summ_ids[j]]:
pref = [0, 1]
else:
pref = [0.5, 0.5]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
# print(pair_list)
topic_count += 1
summ_count = summ_count + len(summ_ids)
print("topics", topic_count)
print("summ", summ_count)
return pair_list
def build_anno_pairs(entries, pair_anno_scores):
pair_list = []
topic_count = 0
summ_count = 0
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
if i == j: continue
# get keys from dictionary
entry_keys = list(entry.keys())
# get pair preference from pair_anno_scores
for pair in pair_anno_scores[article_id]:
if pair['summ_id_i'] == int(entry_keys[i][8]) and pair['summ_id_j'] == int(entry_keys[j][8]):
if pair['pref'] == 1:
pref = [1, 0]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
else:
pref = [0, 1]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
elif pair['summ_id_j'] == int(entry_keys[i][8]) and pair['summ_id_i'] == int(entry_keys[j][8]):
if pair['pref'] == 1:
pref = [0, 1]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
else:
pref = [1, 0]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
topic_count += 1
summ_count = summ_count + len(summ_ids)
print("topics", topic_count)
print("summ", summ_count)
# print(pair_list)
return pair_list
def build_human_pair_scores(pair_list):
human_pair_scores = {}
for entry in pair_list:
article_id = str(entry[0])
sum_id_i = str(entry[1])
sum_id_j = str(entry[2])
pref = entry[3]
summ_entry = {}
if article_id in human_pair_scores:
if pref == [1, 0]:
if sum_id_i in human_pair_scores[article_id]:
human_pair_scores[article_id][sum_id_i] + 1
else:
human_pair_scores[article_id][sum_id_i] = 1
else:
if sum_id_j in human_pair_scores[article_id]:
human_pair_scores[article_id][sum_id_j] + 1
else:
human_pair_scores[article_id][sum_id_j] = 1
else:
if pref == [1, 0]:
summ_entry[sum_id_i] = 1
summ_entry[sum_id_j] = 0
else:
summ_entry[sum_id_i] = 0
summ_entry[sum_id_j] = 1
human_pair_scores[article_id] = summ_entry
return human_pair_scores
# randomize_pref_order and double_prefs are only relevant if the learning function learns f(s0,s1)=pref. in our case, we learn f(s0)=pref[0] and f(s1)=pref[1], so this should be set to False
def build_pairs_majority_preferences(entries, sorted_scores, target_type='graded', ignore_ties=False,
randomize_pref_order=False, double_prefs=False):
pair_list = []
topic_count = 0
anno_count = 0
summ_count = 0
entries_text = {}
# get summary text and matching id
for article_id, scores_list in tqdm(sorted_scores.items()):
temp_entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
# get summary text
s_text = [s['sys_summ'] for s in scores_list if s['summ_id'] == sid][0]
temp_entry['sys_summ' + repr(sid)] = s_text
# save in dictionary
entries_text[article_id] = temp_entry
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# mapping from summary text to last summary id with that text. that's the one we will use
summ2id = {entries_text[article_id][summ_id]: summ_id for summ_id in summ_ids}
# put here the prefs for this article
article_prefs = {}
# still run through all pairs
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
# run through dictionary containing summ_ids and matching text
# for key, value in entries_text[article_id].items():
# get text for current summaries i and j
# if key == summ_ids[i]:
# text_i = value
# elif key == summ_ids[j]:
# text_j = value
text_i = entries_text[article_id][summ_ids[i]]
text_j = entries_text[article_id][summ_ids[j]]
# check if text is identical, if yes skip
if i == j or text_i == text_j:
# print("DUPLICATE FOUND: TEXT i", text_i, "TEXT j", text_i)
continue
# get the unique summ ids
unique_summ_id_pair = [summ2id[text_i], summ2id[text_j]]
# some debug output
# noinspection PyUnreachableCode
if False:
print("%s vs. %s (IDs %s vs. %s)" % (
summ_ids[i], summ_ids[j], unique_summ_id_pair[0], unique_summ_id_pair[1]))
full_entry = sorted_scores[article_id]
print(" system %s with score %s (%s) vs." % (
full_entry[i]['sys_name'], full_entry[i]['scores']['redundancy'], entry[summ_ids[i]]))
print(" system %s with score %s (%s)" % (
full_entry[j]['sys_name'], full_entry[j]['scores']['redundancy'], entry[summ_ids[j]]))
print(
" \"%s...\" vs. \"%s...\"" % (full_entry[i]['sys_summ'][:20], full_entry[j]['sys_summ'][:20]))
# unique_summ_id_pair.sort()
if entry[summ_ids[i]] > entry[summ_ids[j]]:
pref = [1, 0]
elif entry[summ_ids[i]] < entry[summ_ids[j]]:
pref = [0, 1]
else:
pref = [0.5, 0.5]
# if entry[unique_summ_id_pair[0]] > entry[unique_summ_id_pair[1]]:
# pref = [1, 0]
# elif entry[unique_summ_id_pair[0]] > entry[unique_summ_id_pair[1]]:
# pref = [0, 1]
# else:
# # todo we could completely ignore ties. doesnt change much. low prio
# pref = [0.5, 0.5]
# sort the ids so that we get a unique key, so that (sys_summ0,sys_summ1) and (sys_summ1,sys_summ0) are the same
if unique_summ_id_pair[1] < unique_summ_id_pair[0]:
unique_summ_id_pair = unique_summ_id_pair[::-1]
pref = pref[::-1]
# convert to tuple, otherwise its not hashable for the dict
unique_summ_id_pair = tuple(unique_summ_id_pair)
# add up the pref to the total pref vector of the specific summary pair. create a new entry if not existing
article_prefs[unique_summ_id_pair] = article_prefs.get(unique_summ_id_pair,
np.array([0, 0])) + np.array(pref)
# transform to target
for unique_summ_id_pair, pref in article_prefs.items():
# depending on the mode, use binary target, or graded one
pref = (pref / (pref[0] + pref[1])).tolist()
if target_type == 'binary':
if pref[0] > pref[1]:
pref = [1, 0]
elif pref[0] < pref[1]:
pref = [1, 0]
else:
pref = [0.5, 0.5]
# skip if it is a tie and you want to ignore ties
if pref[0] != 0.5 or not ignore_ties:
# include the pref two times, once in one direction and once in the other direction
if double_prefs:
pair_list.append((article_id, unique_summ_id_pair[1], unique_summ_id_pair[0], pref[::-1]))
pair_list.append((article_id, unique_summ_id_pair[0], unique_summ_id_pair[1], pref))
else:
# include the pref in the reverse order by chance. this might be necessary if there is a bias in the distribution of the score, e.g. if they are ordered
if randomize_pref_order and bool(random.getrandbits(1)):
pair_list.append((article_id, unique_summ_id_pair[1], unique_summ_id_pair[0], pref[::-1]))
else:
pair_list.append((article_id, unique_summ_id_pair[0], unique_summ_id_pair[1], pref))
topic_count += 1
anno_count += len(summ_ids)
summ_count += len(summ2id)
print("topics", topic_count)
print("annotations", anno_count)
print("summ", summ_count)
print("summ pairs", len(pair_list))
return pair_list
def build_pair_vecs(vecs, pairs):
pair_vec_list = []
for aid, sid1, sid2, _ in pairs:
article_vec = list(vecs[aid]['article'])
s1_vec = list(vecs[aid][sid1])
s2_vec = list(vecs[aid][sid2])
pair_vec_list.append([article_vec + s1_vec, article_vec + s2_vec])
return pair_vec_list
def pair_train_rewarder(vec_dic, pairs, deep_model, optimiser, loss_only, batch_size=32, device='cpu'):
loss_list = []
shuffled_pairs = pairs[:]
np.random.shuffle(shuffled_pairs)
vec_pairs = build_pair_vecs(vec_dic, shuffled_pairs)
# print('total number of pairs built: {}'.format(len(vec_pairs)))
for pointer in range(int((len(
pairs) - 1) / batch_size) + 1): # there was a bug here. when len(pairs) was a vielfaches of 32, then there was a last batch with [] causing an exception
vec_batch = vec_pairs[pointer * batch_size:(pointer + 1) * batch_size]
target_batch = shuffled_pairs[pointer * batch_size:(pointer + 1) * batch_size]
target_batch = [ee[-1] for ee in target_batch]
if loss_only:
loss = deep_pair_train_loss_only(vec_batch, target_batch, deep_model, optimiser, device)
else:
loss = deep_pair_train(vec_batch, target_batch, deep_model, optimiser, device)
loss_list.append(loss)
return np.mean(loss_list)
def test_rewarder(vec_list, human_scores, model, device, plot_file=None):
results = {'rho': [], 'rho_p': [], 'pcc': [], 'pcc_p': [], 'tau': [], 'tau_p': [], 'rho_global': [],
'pcc_global': [], 'tau_global': []}
true_scores_all = []
pred_scores_all = np.array([])
# print(human_scores)
# pred_scores_all = []
for article_id in human_scores:
entry = human_scores[article_id]
summ_ids = list(entry.keys())
if len(summ_ids) < 2: continue
concat_vecs = []
true_scores = []
for i in range(len(summ_ids)):
article_vec = list(vec_list[article_id]['article'])
summ_vec = list(vec_list[article_id][summ_ids[i]])
# print(np.array(concat_vecs).shape, np.array(article_vec).shape, np.array(summ_vec).shape)
concat_vecs.append(article_vec + summ_vec)
# print(np.array(concat_vecs).shape)
# print(entry[summ_ids[i]])
true_scores.append(entry[summ_ids[i]])
true_scores_all += true_scores # add scores for topic to list of all scores
input = Variable(torch.from_numpy(np.array(concat_vecs)).float())
if 'gpu' in device:
input = input.to('cuda')
model.eval()
with torch.no_grad():
# print(true_scores)
# print(np.array(true_scores).shape)
# print(input)
# print(input.shape)
# print(model(input).data.cpu().numpy())
# print(model(input).data.cpu().numpy().shape)
pred_scores = model(input).data.cpu().numpy().reshape(1, -1)[0]
pred_scores_all = np.concatenate((pred_scores_all, pred_scores), axis=0)
# pred_scores_all += pred_scores.tolist()
rho, rho_p = spearmanr(true_scores, pred_scores)
pcc, pcc_p = pearsonr(true_scores, pred_scores)
tau, tau_p = kendalltau(true_scores, pred_scores)
if not (math.isnan(rho) or math.isnan(pcc) or math.isnan(tau)):
results['rho'].append(rho)
results['rho_p'].append(rho_p)
results['pcc'].append(pcc)
results['pcc_p'].append(pcc_p)
results['tau'].append(tau)
results['tau_p'].append(tau_p)
rho = spearmanr(true_scores_all, pred_scores_all)[0]
pcc = pearsonr(true_scores_all, pred_scores_all)[0]
tau = kendalltau(true_scores_all, pred_scores_all)[0]
if not (math.isnan(rho) or math.isnan(pcc) or math.isnan(tau)):
results['rho_global'].append(rho)
results['pcc_global'].append(pcc)
results['tau_global'].append(tau)
if plot_file is not None:
fig, ax = plt.subplots()
# true_scores_all=np.array(true_scores_all)
# pred_scores_all=np.array(pred_scores_all)
unique = np.sort(np.unique(true_scores_all))
data_to_plot = [pred_scores_all[true_score == true_scores_all] for true_score in unique]
# bw_methods determines how soft the distribution curve will be. lower values are more sharp
ax.violinplot(data_to_plot, showmeans=True, showmedians=True, bw_method=0.2)
ax.scatter(true_scores_all + np.random.normal(0, 0.1, pred_scores_all.shape[0]), pred_scores_all, marker=".",
s=3, alpha=0.5)
ax.set_title('Comparison and distributions of true values to predicted score')
ax.set_xlabel('true scores')
ax.set_ylabel('predicted scores')
xticklabels = true_scores_all
ax.set_xticks(true_scores_all)
print("violin plot written to: %s" % plot_file)
plt.savefig(plot_file)
return results
def parse_args(argv):
ap = argparse.ArgumentParser("arguments for summary sampler")
ap.add_argument('-e', '--epoch_num', type=int, default=50)
ap.add_argument('-b', '--batch_size', type=int, default=32)
ap.add_argument('-tt', '--train_type', type=str, help='pairwise or regression', default='pairwise')
ap.add_argument('-tp', '--train_percent', type=float, help='how many data used for training', default=.64)
ap.add_argument('-dp', '--dev_percent', type=float, help='how many data used for dev', default=.16)
ap.add_argument('-lr', '--learn_rate', type=float, help='learning rate', default=3e-4)
ap.add_argument('-mt', '--model_type', type=str, help='deep/linear', default='linear')
ap.add_argument('-dv', '--device', type=str, help='cpu/gpu', default='gpu')
ap.add_argument('-se', '--seed', type=int, help='random seed number', default='1')
ap.add_argument('-fn', '--file_name', type=str, help='file name for csv output',
default='BetterRewardsStatistics_test.csv')
args = ap.parse_args(argv)
return args.epoch_num, args.batch_size, args.train_type, args.train_percent, args.dev_percent, args.learn_rate, args.model_type, args.device, args.seed, args.file_name
def main(argv):
epoch_num, batch_size, train_type, train_percent, dev_percent, learn_rate, model_type, device, seed, file_name = parse_args(
argv[1:])
print('\n=====Arguments====')
print('epoch num {}'.format(epoch_num))
print('batch size {}'.format(batch_size))
print('train type {}'.format(train_type))
print('train percent {}'.format(train_percent))
print('dev percent {}'.format(dev_percent))
print('learn rate {}'.format(learn_rate))
print('model type {}'.format(model_type))
print('device {}'.format(device))
print('seed {}'.format(seed))
print('file name {}'.format(file_name))
print('=====Arguments====\n')
csv_column_names = ['seed', 'learn_rate', 'model_type', 'train_pairs', 'dev_pairs', 'test_pairs', 'epoch_num',
'loss_train', 'loss_dev', 'loss_test', 'rho_train', 'rho_p_train', 'pcc_train', 'pcc_p_train',
'tau_train', 'tau_p_train', 'rho_train_global', 'pcc_train_global', 'tau_train_global',
'rho_dev', 'rho_p_dev', 'pcc_dev', 'pcc_p_dev', 'tau_dev', 'tau_p_dev',
'rho_dev_global', 'pcc_dev_global', 'tau_dev_global', 'rho_test', 'rho_p_test', 'pcc_test',
'pcc_p_test', 'tau_test', 'tau_p_test', 'rho_test_global', 'pcc_test_global', 'tau_test_global']
# check if csv_file exists
if path.exists(file_name):
csv_exists = True
else:
csv_exists = False
with open(file_name, 'a', newline='') as csv_file:
writer = csv.writer(csv_file)
# if a new csv_file is generated, write column names
if csv_exists is False:
writer.writerow(csv_column_names)
np.random.seed(seed=seed)
random.seed(seed)
torch.random.manual_seed(seed)
torch.manual_seed(seed)
if train_percent + dev_percent >= 1.:
print('ERROR! Train data percentage plus dev data percentage is {}! Make sure the sum is below 1.0!'.format(
train_percent + dev_percent))
exit(1)
BERT_VEC_LENGTH = 1024 # change this to 768 if you use bert-base
deep_model, optimiser = build_model(model_type, BERT_VEC_LENGTH * 2, learn_rate)
if 'gpu' in device:
deep_model.to('cuda')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# read human scores and vectors for summaries/docs, and split the train/dev/test set
sorted_scores = read_sorted_scores()
# read pair anno scores
pair_anno_scores = read_pair_anno_scores()
# train, dev, test, all = parse_split_data(sorted_scores, train_percent, dev_percent)
train, dev, test, all = parse_split_data_balanced(sorted_scores, train_percent, dev_percent)
# without majority preferences
# train_pairs = build_pairs(train)
# dev_pairs = build_pairs(dev)
# test_pairs = build_pairs(test)
# without majority preferences but with pair anno
train_pairs = build_anno_pairs(train, pair_anno_scores)
dev_pairs = build_anno_pairs(dev, pair_anno_scores)
test_pairs = build_anno_pairs(test, pair_anno_scores)
# with majority preferences
# train_pairs = build_pairs_majority_preferences(train, sorted_scores)
# dev_pairs = build_pairs_majority_preferences(dev, sorted_scores)
# test_pairs = build_pairs_majority_preferences(test, sorted_scores)
# with majority preferences and pair anno
# train_pairs = build_anno_pairs_majority_preferences(train, sorted_scores, pair_anno_scores)
# dev_pairs = build_anno_pairs_majority_preferences(dev, sorted_scores, pair_anno_scores)
# test_pairs = build_anno_pairs_majority_preferences(test, sorted_scores, pair_anno_scores)
# build human pair scores for pairs
train_anno = build_human_pair_scores(train_pairs)
dev_anno = build_human_pair_scores(dev_pairs)
test_anno = build_human_pair_scores(test_pairs)
print(len(train_pairs), len(dev_pairs), len(test_pairs))
# read bert vectors
with open('data/doc_summ_bert_vectors.pkl', 'rb') as ff:
all_vec_dic = pickle.load(ff)
pcc_list = []
weights_list = []
for ii in range(epoch_num + 1):
print('\n=====EPOCH {}====='.format(ii))
if ii == 0:
# do not train in epoch 0, just evaluate the performance of the randomly initialized model (sanity check and baseline)
loss_train = pair_train_rewarder(all_vec_dic, train_pairs, deep_model, optimiser, True, batch_size,
device)
else:
# from epoch 1 on, receive the data and learn from it. the loss is still the loss before fed with the training examples
loss_train = pair_train_rewarder(all_vec_dic, train_pairs, deep_model, optimiser, False, batch_size,
device)
loss_dev = pair_train_rewarder(all_vec_dic, dev_pairs, deep_model, optimiser, True, batch_size, device)
loss_test = pair_train_rewarder(all_vec_dic, test_pairs, deep_model, optimiser, True, batch_size, device)
csv_row = [seed, learn_rate, model_type, len(train_pairs), len(dev_pairs), len(test_pairs), ii, loss_train,
loss_dev, loss_test]
print('--> losses (train,dev,test)', loss_train, loss_dev, loss_test)
# Train-Data only
print("==Train==")
# results_train = test_rewarder(all_vec_dic, train, deep_model, device)
results_train = test_rewarder(all_vec_dic, train_anno, deep_model, device)
for metric in results_train:
print('{}\t{}'.format(metric, np.mean(results_train[metric])))
csv_row.append(np.mean(results_train[metric]))
print("==Dev==")
# results = test_rewarder(all_vec_dic, dev, deep_model, device)
results = test_rewarder(all_vec_dic, dev_anno, deep_model, device)
for metric in results:
print('{}\t{}'.format(metric, np.mean(results[metric])))
csv_row.append(np.mean(results[metric]))
# Test-Data only
print("==Test==")
# results_test = test_rewarder(all_vec_dic, test, deep_model, device)
results_test = test_rewarder(all_vec_dic, test_anno, deep_model, device)
for metric in results_test:
print('{}\t{}'.format(metric, np.mean(results_test[metric])))
csv_row.append(np.mean(results_test[metric]))
writer.writerow(csv_row)
pcc_list.append(np.mean(results['pcc']))
weights_list.append(copy.deepcopy(deep_model.state_dict()))
idx = np.argmax(pcc_list)
best_result = pcc_list[idx]
print('\n======Best results come from epoch no. {}====='.format(idx))
deep_model.load_state_dict(weights_list[idx])
output_pattern = 'batch{}_{}_trainPercent{}_seed{}_lrate{}_{}_epoch{}'.format(
batch_size, train_type, train_percent, seed, learn_rate, model_type, epoch_num
)
test_results = test_rewarder(all_vec_dic, test, deep_model, device,
os.path.join(OUTPUTS_DIR, output_pattern + '_onTest.pdf'))
test_rewarder(all_vec_dic, train, deep_model, device,
os.path.join(OUTPUTS_DIR, output_pattern + '_onTrain.pdf'))
test_rewarder(all_vec_dic, dev, deep_model, device, os.path.join(OUTPUTS_DIR, output_pattern + '_onDev.pdf'))
print('Its performance on the test set is:')
for metric in test_results:
print('{}\t{}'.format(metric, np.mean(test_results[metric])))
model_weight_name = 'pcc{0:.4f}_'.format(np.mean(test_results['pcc']))
model_weight_name += 'seed{}_epoch{}_batch{}_{}_trainPercent{}_lrate{}_{}.model'.format(
seed, epoch_num, batch_size, train_type, train_percent, learn_rate, model_type
)
torch.save(weights_list[idx], os.path.join(MODEL_WEIGHT_DIR, model_weight_name))
print('\nbest model weight saved to: {}'.format(os.path.join(MODEL_WEIGHT_DIR, model_weight_name)))
if __name__ == '__main__':
main(sys.argv)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.argmax",
"random.shuffle",
"numpy.mean",
"torch.nn.Softmax",
"pickle.load",
"numpy.random.normal",
"scipy.stats.kendalltau",
"torch.no_grad",
"os.path.join",
"numpy.unique",
"torch.nn.BCELoss",
"os.path.exists",
"random.seed",
"torch.nn.Linear",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle",
"math.isnan",
"scorer.data_helper.json_reader.read_pair_anno_scores",
"csv.writer",
"torch.random.manual_seed",
"torch.manual_seed",
"scipy.stats.pearsonr",
"random.random",
"numpy.concatenate",
"torch.nn.ReLU",
"scipy.stats.spearmanr",
"scorer.data_helper.json_reader.read_sorted_scores",
"numpy.array",
"random.getrandbits",
"matplotlib.pyplot.savefig"
] |
[((1669, 1696), 'random.shuffle', 'random.shuffle', (['article_ids'], {}), '(article_ids)\n', (1683, 1696), False, 'import random\n'), ((3637, 3660), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3653, 3660), False, 'import torch\n'), ((4015, 4033), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (4031, 4033), False, 'import torch\n'), ((4545, 4568), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (4561, 4568), False, 'import torch\n'), ((4923, 4941), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (4939, 4941), False, 'import torch\n'), ((16061, 16094), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_pairs'], {}), '(shuffled_pairs)\n', (16078, 16094), True, 'import numpy as np\n'), ((16916, 16934), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (16923, 16934), True, 'import numpy as np\n'), ((17214, 17226), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17222, 17226), True, 'import numpy as np\n'), ((20592, 20648), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""arguments for summary sampler"""'], {}), "('arguments for summary sampler')\n", (20615, 20648), False, 'import argparse\n'), ((23194, 23216), 'os.path.exists', 'path.exists', (['file_name'], {}), '(file_name)\n', (23205, 23216), False, 'from os import path\n'), ((1080, 1095), 'random.random', 'random.random', ([], {}), '()\n', (1093, 1095), False, 'import random\n'), ((18715, 18750), 'scipy.stats.spearmanr', 'spearmanr', (['true_scores', 'pred_scores'], {}), '(true_scores, pred_scores)\n', (18724, 18750), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((18772, 18806), 'scipy.stats.pearsonr', 'pearsonr', (['true_scores', 'pred_scores'], {}), '(true_scores, pred_scores)\n', (18780, 18806), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((18828, 18864), 'scipy.stats.kendalltau', 'kendalltau', (['true_scores', 'pred_scores'], {}), '(true_scores, pred_scores)\n', (18838, 18864), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19193, 19236), 'scipy.stats.spearmanr', 'spearmanr', (['true_scores_all', 'pred_scores_all'], {}), '(true_scores_all, pred_scores_all)\n', (19202, 19236), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19250, 19292), 'scipy.stats.pearsonr', 'pearsonr', (['true_scores_all', 'pred_scores_all'], {}), '(true_scores_all, pred_scores_all)\n', (19258, 19292), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19306, 19350), 'scipy.stats.kendalltau', 'kendalltau', (['true_scores_all', 'pred_scores_all'], {}), '(true_scores_all, pred_scores_all)\n', (19316, 19350), False, 'from scipy.stats import spearmanr, pearsonr, kendalltau\n'), ((19597, 19611), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19609, 19611), True, 'from matplotlib import pyplot as plt\n'), ((20516, 20538), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_file'], {}), '(plot_file)\n', (20527, 20538), True, 'from matplotlib import pyplot as plt\n'), ((23354, 23374), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (23364, 23374), False, 'import csv\n'), ((23523, 23548), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (23537, 23548), True, 'import numpy as np\n'), ((23557, 23574), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (23568, 23574), False, 'import random\n'), ((23583, 23613), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seed'], {}), '(seed)\n', (23607, 23613), False, 'import torch\n'), ((23622, 23645), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (23639, 23645), False, 'import torch\n'), ((24329, 24349), 'scorer.data_helper.json_reader.read_sorted_scores', 'read_sorted_scores', ([], {}), '()\n', (24347, 24349), False, 'from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, read_processed_scores, read_scores\n'), ((24411, 24434), 'scorer.data_helper.json_reader.read_pair_anno_scores', 'read_pair_anno_scores', ([], {}), '()\n', (24432, 24434), False, 'from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, read_processed_scores, read_scores\n'), ((28711, 28730), 'numpy.argmax', 'np.argmax', (['pcc_list'], {}), '(pcc_list)\n', (28720, 28730), True, 'import numpy as np\n'), ((2866, 2896), 'torch.nn.Linear', 'torch.nn.Linear', (['vec_length', '(1)'], {}), '(vec_length, 1)\n', (2881, 2896), False, 'import torch\n'), ((3034, 3049), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (3047, 3049), False, 'import torch\n'), ((18207, 18222), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18220, 18222), False, 'import torch\n'), ((18584, 18638), 'numpy.concatenate', 'np.concatenate', (['(pred_scores_all, pred_scores)'], {'axis': '(0)'}), '((pred_scores_all, pred_scores), axis=0)\n', (18598, 18638), True, 'import numpy as np\n'), ((19366, 19381), 'math.isnan', 'math.isnan', (['rho'], {}), '(rho)\n', (19376, 19381), False, 'import math\n'), ((19385, 19400), 'math.isnan', 'math.isnan', (['pcc'], {}), '(pcc)\n', (19395, 19400), False, 'import math\n'), ((19404, 19419), 'math.isnan', 'math.isnan', (['tau'], {}), '(tau)\n', (19414, 19419), False, 'import math\n'), ((19743, 19769), 'numpy.unique', 'np.unique', (['true_scores_all'], {}), '(true_scores_all)\n', (19752, 19769), True, 'import numpy as np\n'), ((26057, 26072), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (26068, 26072), False, 'import pickle\n'), ((29201, 29258), 'os.path.join', 'os.path.join', (['OUTPUTS_DIR', "(output_pattern + '_onTest.pdf')"], {}), "(OUTPUTS_DIR, output_pattern + '_onTest.pdf')\n", (29213, 29258), False, 'import os\n'), ((29344, 29402), 'os.path.join', 'os.path.join', (['OUTPUTS_DIR', "(output_pattern + '_onTrain.pdf')"], {}), "(OUTPUTS_DIR, output_pattern + '_onTrain.pdf')\n", (29356, 29402), False, 'import os\n'), ((29464, 29520), 'os.path.join', 'os.path.join', (['OUTPUTS_DIR', "(output_pattern + '_onDev.pdf')"], {}), "(OUTPUTS_DIR, output_pattern + '_onDev.pdf')\n", (29476, 29520), False, 'import os\n'), ((29734, 29762), 'numpy.mean', 'np.mean', (["test_results['pcc']"], {}), "(test_results['pcc'])\n", (29741, 29762), True, 'import numpy as np\n'), ((30001, 30050), 'os.path.join', 'os.path.join', (['MODEL_WEIGHT_DIR', 'model_weight_name'], {}), '(MODEL_WEIGHT_DIR, model_weight_name)\n', (30013, 30050), False, 'import os\n'), ((18881, 18896), 'math.isnan', 'math.isnan', (['rho'], {}), '(rho)\n', (18891, 18896), False, 'import math\n'), ((18900, 18915), 'math.isnan', 'math.isnan', (['pcc'], {}), '(pcc)\n', (18910, 18915), False, 'import math\n'), ((18919, 18934), 'math.isnan', 'math.isnan', (['tau'], {}), '(tau)\n', (18929, 18934), False, 'import math\n'), ((20092, 20142), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', 'pred_scores_all.shape[0]'], {}), '(0, 0.1, pred_scores_all.shape[0])\n', (20108, 20142), True, 'import numpy as np\n'), ((28599, 28622), 'numpy.mean', 'np.mean', (["results['pcc']"], {}), "(results['pcc'])\n", (28606, 28622), True, 'import numpy as np\n'), ((30108, 30157), 'os.path.join', 'os.path.join', (['MODEL_WEIGHT_DIR', 'model_weight_name'], {}), '(MODEL_WEIGHT_DIR, model_weight_name)\n', (30120, 30157), False, 'import os\n'), ((3443, 3461), 'numpy.array', 'np.array', (['vec_list'], {}), '(vec_list)\n', (3451, 3461), True, 'import numpy as np\n'), ((4351, 4369), 'numpy.array', 'np.array', (['vec_list'], {}), '(vec_list)\n', (4359, 4369), True, 'import numpy as np\n'), ((13857, 13871), 'numpy.array', 'np.array', (['pref'], {}), '(pref)\n', (13865, 13871), True, 'import numpy as np\n'), ((27744, 27774), 'numpy.mean', 'np.mean', (['results_train[metric]'], {}), '(results_train[metric])\n', (27751, 27774), True, 'import numpy as np\n'), ((28100, 28124), 'numpy.mean', 'np.mean', (['results[metric]'], {}), '(results[metric])\n', (28107, 28124), True, 'import numpy as np\n'), ((28502, 28531), 'numpy.mean', 'np.mean', (['results_test[metric]'], {}), '(results_test[metric])\n', (28509, 28531), True, 'import numpy as np\n'), ((29653, 29682), 'numpy.mean', 'np.mean', (['test_results[metric]'], {}), '(test_results[metric])\n', (29660, 29682), True, 'import numpy as np\n'), ((13837, 13853), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (13845, 13853), True, 'import numpy as np\n'), ((18076, 18097), 'numpy.array', 'np.array', (['concat_vecs'], {}), '(concat_vecs)\n', (18084, 18097), True, 'import numpy as np\n'), ((27680, 27710), 'numpy.mean', 'np.mean', (['results_train[metric]'], {}), '(results_train[metric])\n', (27687, 27710), True, 'import numpy as np\n'), ((28042, 28066), 'numpy.mean', 'np.mean', (['results[metric]'], {}), '(results[metric])\n', (28049, 28066), True, 'import numpy as np\n'), ((28439, 28468), 'numpy.mean', 'np.mean', (['results_test[metric]'], {}), '(results_test[metric])\n', (28446, 28468), True, 'import numpy as np\n'), ((3848, 3864), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (3856, 3864), True, 'import numpy as np\n'), ((4756, 4772), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (4764, 4772), True, 'import numpy as np\n'), ((15048, 15069), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (15066, 15069), False, 'import random\n')]
|
import argparse
parser = argparse.ArgumentParser(prog="connvis",description='Web-based conntrack that tries to simplify the data for privacy research')
parser.add_argument('--nodnsseed', help='do not seed domains from dnsmasq history',action='store_true')
parser.add_argument('--shell', help='Enable interactive shell',action='store_true')
args = parser.parse_args()
import ipaddress
homenetwork = ipaddress.ip_network('192.168.0.0/24')
homenetwork_router = ipaddress.ip_address('192.168.0.1')
aggregate_google=True # That is a lot of domains
ignored_domains=["osoite.local"]
|
[
"ipaddress.ip_address",
"ipaddress.ip_network",
"argparse.ArgumentParser"
] |
[((25, 157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""connvis"""', 'description': '"""Web-based conntrack that tries to simplify the data for privacy research"""'}), "(prog='connvis', description=\n 'Web-based conntrack that tries to simplify the data for privacy research')\n", (48, 157), False, 'import argparse\n'), ((401, 439), 'ipaddress.ip_network', 'ipaddress.ip_network', (['"""192.168.0.0/24"""'], {}), "('192.168.0.0/24')\n", (421, 439), False, 'import ipaddress\n'), ((461, 496), 'ipaddress.ip_address', 'ipaddress.ip_address', (['"""192.168.0.1"""'], {}), "('192.168.0.1')\n", (481, 496), False, 'import ipaddress\n')]
|
import collections
import cPickle as pickle
import glob
import itertools
import json
import operator
import os
import re
import sys
from program_synthesis.karel.dataset import dataset
from program_synthesis.karel.dataset import executor
from program_synthesis.karel.dataset.karel_runtime import KarelRuntime
from program_synthesis.karel.models import karel_model
from program_synthesis.common.tools.saver import restore_args
BASE_DIR = ""
with open(BASE_DIR + "text2code-models/karel-sgd-cl1-lr1-lds100k-ldr0.5/report-dev-00100100.jsonl") as f:
baseline_report = []
print(f.readline())
for line in f:
baseline_report.append(json.loads(line))
class Args(object):
model_dir = BASE_DIR + 'program_synthesis-models/karel-lgrl-ref-m123-sgd-cl1-lr0.1-lds100k-ldr0.5'
step = 250100
args = Args()
restore_args(args)
args.word_vocab = ',,/data/karel/word.vocab'
m = karel_model.KarelLGRLRefineModel(args)
batch_processor = m.batch_processor(for_eval=True)
m.args.max_beam_trees = 64
m.args.max_eval_trials = 64
i = 0
result = []
while i < len(baseline_report):
batch = []
while len(batch) < 32 and i < len(baseline_report):
if baseline_report[i]['code']['info']['trees_checked'] == 1:
i += 1
continue
e = dataset.KarelExample.from_dict(baseline_report[i]['example'])
ref_code_sequence = baseline_report[i]['code']['info']['candidates'][0]
e.ref_example = dataset.KarelExample(idx=None, guid=None, code_sequence=ref_code_sequence, input_tests=e.input_tests, tests=e.tests)
batch.append(e)
i += 1
print("Starting batch (%d)..." % i)
res = m.inference(batch_processor(batch))
for example, infer in zip(batch, res):
result.append((example, infer))
# if i > 100:
# break
print(len(result), len(baseline_report))
the_executor = executor.KarelExecutor()
stats = {'total': len(result), 'fixed': 0}
refinement_results = []
for example, infer in result:
if not infer.code_sequence:
continue
correct = True
for test in example.input_tests + example.tests:
try:
log = the_executor.execute(infer.code_sequence, None, test['input'])
if log.result != test['output']:
correct = False
break
except (executor.ExecutorRuntimeException, executor.ExecutorSyntaxException) as e:
correct = False
break
refinement_results.append(correct)
if correct:
stats['fixed'] += 1
print(float(stats['fixed']) / stats['total'], stats['fixed'], stats['total'])
|
[
"program_synthesis.karel.dataset.dataset.KarelExample.from_dict",
"program_synthesis.karel.dataset.dataset.KarelExample",
"json.loads",
"program_synthesis.common.tools.saver.restore_args",
"program_synthesis.karel.models.karel_model.KarelLGRLRefineModel",
"program_synthesis.karel.dataset.executor.KarelExecutor"
] |
[((822, 840), 'program_synthesis.common.tools.saver.restore_args', 'restore_args', (['args'], {}), '(args)\n', (834, 840), False, 'from program_synthesis.common.tools.saver import restore_args\n'), ((890, 928), 'program_synthesis.karel.models.karel_model.KarelLGRLRefineModel', 'karel_model.KarelLGRLRefineModel', (['args'], {}), '(args)\n', (922, 928), False, 'from program_synthesis.karel.models import karel_model\n'), ((1860, 1884), 'program_synthesis.karel.dataset.executor.KarelExecutor', 'executor.KarelExecutor', ([], {}), '()\n', (1882, 1884), False, 'from program_synthesis.karel.dataset import executor\n'), ((1280, 1341), 'program_synthesis.karel.dataset.dataset.KarelExample.from_dict', 'dataset.KarelExample.from_dict', (["baseline_report[i]['example']"], {}), "(baseline_report[i]['example'])\n", (1310, 1341), False, 'from program_synthesis.karel.dataset import dataset\n'), ((1446, 1566), 'program_synthesis.karel.dataset.dataset.KarelExample', 'dataset.KarelExample', ([], {'idx': 'None', 'guid': 'None', 'code_sequence': 'ref_code_sequence', 'input_tests': 'e.input_tests', 'tests': 'e.tests'}), '(idx=None, guid=None, code_sequence=ref_code_sequence,\n input_tests=e.input_tests, tests=e.tests)\n', (1466, 1566), False, 'from program_synthesis.karel.dataset import dataset\n'), ((647, 663), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (657, 663), False, 'import json\n')]
|
import pyximport;
pyximport.install(setup_args = {"script_args" : ["--force"]},
language_level=3)
import unittest
import uttemplate
import cymapinterfacetester as cyt
from cykhash import Int64to64Map, Int32to32Map, Float64to64Map, Float32to32Map, PyObjectMap
AS_LIST = {'int64' : cyt.as_py_list_int64,
'int32' : cyt.as_py_list_int32,
'float64' : cyt.as_py_list_int64_float64,
'float32' : cyt.as_py_list_int32_float32,
'object' : cyt.as_py_list_pyobject,
}
USE_INT = {'int64' : cyt.use_int64,
'int32' : cyt.use_int32,
'float64' : cyt.use_int64_float64,
'float32' : cyt.use_int32_float32,
'object' : cyt.use_pyobject,
}
USE_FLOAT = {'int64' : cyt.use_float64,
'int32' : cyt.use_float32,
'float64' : cyt.use_float64_float64,
'float32' : cyt.use_float32_float32,
'object' : cyt.use_pyobject,
}
MAP = {'int64' : Int64to64Map,
'int32' : Int32to32Map,
'float64' : Float64to64Map,
'float32' : Float32to32Map,
'object' : PyObjectMap,
}
#just making sure the interface can be accessed:
@uttemplate.from_templates(['int64',
'int32',
'float64',
'float32',
])
class CyMypInterfaceTester(unittest.TestCase):
def template_cimport_use_int(self, map_type):
received=USE_INT[map_type]([1,2,3,4], [5,6,7,8], [2,3])
expected=[6,7]
self.assertEqual(received, expected)
def template_cimport_use_float(self, map_type):
received=USE_FLOAT[map_type]([1,2,3,4], [5.5,6.5,7.5,8.5], [2,3])
expected=[6.5,7.5]
self.assertEqual(received, expected)
def template_as_py_list(self, map_type):
cy_map = MAP[map_type]()
cy_map[3] = 20
lst = AS_LIST[map_type](cy_map)
self.assertEqual(lst, [3,20])
def template_as_py_list_2(self, map_type):
cy_map = MAP[map_type]()
cy_map[3] = 5
cy_map[4] = 6
lst = AS_LIST[map_type](cy_map)
self.assertEqual(set(lst), set([3,4,5,6]))
|
[
"pyximport.install",
"uttemplate.from_templates"
] |
[((19, 95), 'pyximport.install', 'pyximport.install', ([], {'setup_args': "{'script_args': ['--force']}", 'language_level': '(3)'}), "(setup_args={'script_args': ['--force']}, language_level=3)\n", (36, 95), False, 'import pyximport\n'), ((1290, 1357), 'uttemplate.from_templates', 'uttemplate.from_templates', (["['int64', 'int32', 'float64', 'float32']"], {}), "(['int64', 'int32', 'float64', 'float32'])\n", (1315, 1357), False, 'import uttemplate\n')]
|
import pytest
from tests.util import createNode
node = createNode()
@pytest.mark.query
def test_getzmqnotifications(): # 01
zmq = node.zmq.getzmqnotifications()
assert zmq or zmq == []
|
[
"tests.util.createNode"
] |
[((56, 68), 'tests.util.createNode', 'createNode', ([], {}), '()\n', (66, 68), False, 'from tests.util import createNode\n')]
|
from pandas._config.config import reset_option
from preprocess.load_data.data_loader import load_hotel_reserve
import pandas as pd
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
def main():
"""全結合処理
顧客ごとに2017年1月〜2017年3月の月間合計利用金額を計算
利用がない日は0とする
日付はチェックイン日付を利用する
"""
customer_tb, hotel_tb, reserve_tb = load_hotel_reserve()
print(reserve_tb)
print(customer_tb)
# 日付マスタを生成
month_mst = pd.DataFrame({'year_month': [(date(2017, 1, 1) + relativedelta(months=x)).strftime("%Y%m") for x in range(0, 3)]})
# month_mst['year_month'] = pd.to_datetime(month_mst['year_month'])
# cross_joinのためにすべて同じ値の結合キーを準備
customer_tb['join_key'] = 0
month_mst['join_key'] = 0
# 顧客テーブルと月テーブルを全結合する
customer_mst = pd.merge(customer_tb[['customer_id', 'join_key']], month_mst, on='join_key')
customer_mst.info()
# 年月の結合キーを予約テーブルで準備
reserve_tb['year_month'] = reserve_tb['checkin_date'] \
.apply(lambda x: pd.to_datetime(x).strftime("%Y%m"))
reserve_tb.info()
# 予約レコードと結合し、合計予約金額を計算
# TODO: yearで結合できていない(月単位で結合されてしまっている)
summary_result = pd.merge(
customer_mst,
reserve_tb[['customer_id', 'year_month', 'total_price']],
on=['customer_id', 'year_month'],
how='left'
).groupby(['customer_id', 'year_month'])['total_price'].sum().reset_index()
# 予約レコードがなかった場合の合計金額を値なしから0に変換
summary_result.fillna(0, inplace=True)
print(summary_result.query('customer_id == "c_1"'))
print(reserve_tb.query('customer_id == "c_1"'))
if __name__ == '__main__':
main()
|
[
"pandas.merge",
"preprocess.load_data.data_loader.load_hotel_reserve",
"dateutil.relativedelta.relativedelta",
"datetime.date",
"pandas.to_datetime"
] |
[((364, 384), 'preprocess.load_data.data_loader.load_hotel_reserve', 'load_hotel_reserve', ([], {}), '()\n', (382, 384), False, 'from preprocess.load_data.data_loader import load_hotel_reserve\n'), ((792, 868), 'pandas.merge', 'pd.merge', (["customer_tb[['customer_id', 'join_key']]", 'month_mst'], {'on': '"""join_key"""'}), "(customer_tb[['customer_id', 'join_key']], month_mst, on='join_key')\n", (800, 868), True, 'import pandas as pd\n'), ((1005, 1022), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (1019, 1022), True, 'import pandas as pd\n'), ((492, 508), 'datetime.date', 'date', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (496, 508), False, 'from datetime import date, datetime\n'), ((511, 534), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': 'x'}), '(months=x)\n', (524, 534), False, 'from dateutil.relativedelta import relativedelta\n'), ((1155, 1285), 'pandas.merge', 'pd.merge', (['customer_mst', "reserve_tb[['customer_id', 'year_month', 'total_price']]"], {'on': "['customer_id', 'year_month']", 'how': '"""left"""'}), "(customer_mst, reserve_tb[['customer_id', 'year_month',\n 'total_price']], on=['customer_id', 'year_month'], how='left')\n", (1163, 1285), True, 'import pandas as pd\n')]
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path as op
from .rest_conf import RestmapConf, WebConf
__all__ = ["RestBuilderError", "RestBuilder"]
class RestBuilderError(Exception):
pass
class _RestBuilderOutput:
readme = "README"
default = "default"
bin = "bin"
def __init__(self, path, product):
self._path = path
self._product = product
self._root_path = op.abspath(self._path)
if not op.isdir(self._root_path):
os.makedirs(self._root_path)
self._content = {}
def put(self, subpath, file_name, content):
path = op.join(self._root_path, subpath)
if not op.isdir(path):
os.makedirs(path)
full_name = op.join(path, file_name)
if full_name not in self._content:
self._content[full_name] = []
self._content[full_name].append(content)
def save(self):
for full_name, contents in list(self._content.items()):
full_content = "\n\n".join(contents)
with open(full_name, "w") as f:
f.writelines(full_content)
class RestBuilder:
def __init__(self, schema, handler, output_path, *args, **kwargs):
"""
:param schema:
:param schema: RestSchema
:param handler:
:param output_path:
:param args:
:param kwargs:
"""
self._schema = schema
self._handler = handler
self._output_path = output_path
self._args = args
self._kwargs = kwargs
self.output = _RestBuilderOutput(
self._output_path,
self._schema.product,
)
@property
def restmap_admin(self):
return self._schema.namespace
@property
def restmap_admin_externals(self):
return RestmapConf.admin_externals(self._schema.endpoints)
def build(self):
for endpoint in self._schema.endpoints:
# If the endpoint is oauth, which is for getting accesstoken. Conf file entries should not get created.
if endpoint._name != "oauth":
if endpoint._name == "settings":
self.output.put(
self.output.default,
endpoint.conf_name + ".conf",
endpoint.generate_default_conf(),
)
self.output.put(
self.output.readme,
endpoint.conf_name + ".conf.spec",
endpoint.generate_spec(),
)
# Add data input of self defined conf to inputs.conf.spec
if endpoint._entities[0] and endpoint._entities[0]._conf_name:
lines = [
"[" + endpoint._name + "://<name>]",
"placeholder = placeholder",
]
self.output.put(
self.output.readme, "inputs.conf.spec", "\n".join(lines)
)
self.output.put(
self.output.bin,
endpoint.rh_name + ".py",
endpoint.generate_rh(self._handler),
)
self.output.put(
self.output.default,
"restmap.conf",
RestmapConf.build(
self._schema.endpoints,
self._schema.namespace,
self._schema.admin_match,
),
)
self.output.put(
self.output.default,
"web.conf",
WebConf.build(self._schema.endpoints),
)
self.output.save()
|
[
"os.path.isdir",
"os.path.abspath",
"os.path.join",
"os.makedirs"
] |
[((961, 983), 'os.path.abspath', 'op.abspath', (['self._path'], {}), '(self._path)\n', (971, 983), True, 'import os.path as op\n'), ((1158, 1191), 'os.path.join', 'op.join', (['self._root_path', 'subpath'], {}), '(self._root_path, subpath)\n', (1165, 1191), True, 'import os.path as op\n'), ((1273, 1297), 'os.path.join', 'op.join', (['path', 'file_name'], {}), '(path, file_name)\n', (1280, 1297), True, 'import os.path as op\n'), ((999, 1024), 'os.path.isdir', 'op.isdir', (['self._root_path'], {}), '(self._root_path)\n', (1007, 1024), True, 'import os.path as op\n'), ((1038, 1066), 'os.makedirs', 'os.makedirs', (['self._root_path'], {}), '(self._root_path)\n', (1049, 1066), False, 'import os\n'), ((1207, 1221), 'os.path.isdir', 'op.isdir', (['path'], {}), '(path)\n', (1215, 1221), True, 'import os.path as op\n'), ((1235, 1252), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1246, 1252), False, 'import os\n')]
|
"""Estimate human performance for the scruples resource."""
import json
import logging
import click
from ....baselines.metrics import METRICS
logger = logging.getLogger(__name__)
# main function
@click.command()
@click.argument(
'split_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
def human_performance(
split_path: str,
output_path: str
) -> None:
"""Estimate human performance on the scruples resource.
Read in the split from SPLIT_PATH, then estimate human performance
metrics and write them to OUTPUT_PATH.
Human performance is computed by comparing the majority vote label
of the human performance annotators to the majority vote label of
the gold annotators.
"""
logger.info('Computing human performance.')
human_preds = []
gold_labels = []
with click.open_file(split_path, 'r') as split_file:
for ln in split_file:
row = json.loads(ln)
human_preds.append(row['human_perf_label'])
gold_labels.append(row['gold_label'])
with open(output_path, 'w') as metrics_file:
json.dump({
key: metric(
y_true=gold_labels,
y_pred=human_preds)
for key, (_, metric, scorer_kwargs) in METRICS.items()
if not scorer_kwargs['needs_proba']
}, metrics_file)
|
[
"json.loads",
"click.open_file",
"click.command",
"click.Path",
"logging.getLogger"
] |
[((156, 183), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (173, 183), False, 'import logging\n'), ((204, 219), 'click.command', 'click.command', ([], {}), '()\n', (217, 219), False, 'import click\n'), ((958, 990), 'click.open_file', 'click.open_file', (['split_path', '"""r"""'], {}), "(split_path, 'r')\n", (973, 990), False, 'import click\n'), ((264, 319), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(True)', 'dir_okay': '(False)'}), '(exists=True, file_okay=True, dir_okay=False)\n', (274, 319), False, 'import click\n'), ((366, 422), 'click.Path', 'click.Path', ([], {'exists': '(False)', 'file_okay': '(True)', 'dir_okay': '(False)'}), '(exists=False, file_okay=True, dir_okay=False)\n', (376, 422), False, 'import click\n'), ((1054, 1068), 'json.loads', 'json.loads', (['ln'], {}), '(ln)\n', (1064, 1068), False, 'import json\n')]
|
import unittest
from datetime import datetime
from target_bigquery import stream_utils
class TestStreamUtils(unittest.TestCase):
"""
Unit Tests
"""
def test_add_metadata_values_to_record(self):
"""Test adding metadata"""
dt = "2017-11-20T16:45:33.000Z"
record = { "type": "RECORD", "stream": "foo", "time_extracted": dt, "record": {"id": "2"} }
result = stream_utils.add_metadata_values_to_record(record)
self.assertEqual(result.get("id"), "2")
self.assertEqual(result.get("_sdc_extracted_at"), datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S.%fZ'))
extra_attrs = ['_sdc_batched_at', '_sdc_deleted_at']
for attr in extra_attrs:
self.assertTrue(attr in result)
def test_add_metadata_values_to_record_when_no_time_extracted(self):
"""Test adding metadata when there's no time extracted in the record message """
record = { "type": "RECORD", "stream": "foo", "record": {"id": "2"} }
dt = datetime.now()
result = stream_utils.add_metadata_values_to_record(record)
self.assertEqual(result.get("id"), "2")
self.assertGreaterEqual(result.get("_sdc_extracted_at"), dt)
extra_attrs = ['_sdc_extracted_at', '_sdc_batched_at', '_sdc_deleted_at']
for attr in extra_attrs:
self.assertTrue(attr in result)
|
[
"datetime.datetime.strptime",
"datetime.datetime.now",
"target_bigquery.stream_utils.add_metadata_values_to_record"
] |
[((406, 456), 'target_bigquery.stream_utils.add_metadata_values_to_record', 'stream_utils.add_metadata_values_to_record', (['record'], {}), '(record)\n', (448, 456), False, 'from target_bigquery import stream_utils\n'), ((1007, 1021), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1019, 1021), False, 'from datetime import datetime\n'), ((1039, 1089), 'target_bigquery.stream_utils.add_metadata_values_to_record', 'stream_utils.add_metadata_values_to_record', (['record'], {}), '(record)\n', (1081, 1089), False, 'from target_bigquery import stream_utils\n'), ((564, 610), 'datetime.datetime.strptime', 'datetime.strptime', (['dt', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(dt, '%Y-%m-%dT%H:%M:%S.%fZ')\n", (581, 610), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale per I <NAME>
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
from core import httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
__channel__ = "hokutonoken"
def mainlist(item):
logger.info("[hokutonoken.py] mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Hokuto no Ken - Prima Serie[/COLOR]",
action="episodi",
url="http://pastebin.com/BUqD13hb",
thumbnail="http://i.imgur.com/MGkqu7c.jpg",
fanart="http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg"),
Item(channel=__channel__,
title="[COLOR azure]Hokuto no Ken - Seconda Serie[/COLOR]",
action="episodi",
url="http://pastebin.com/mHXQRBxZ",
thumbnail="http://i159.photobucket.com/albums/t123/Janthem/hnk2.jpg",
fanart="http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg")]
return itemlist
def episodi(item):
logger.info("hokutonoken.py episodi")
itemlist = []
# Downloads page
data = httptools.downloadpage(item.url).data
# Extracts the entries
patron = '><br>(.*?)<a href="(.*?)" target="_blank">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="findvid",
title=scrapedtitle,
thumbnail=item.thumbnail,
url=scrapedurl))
return itemlist
def findvid(item):
logger.info("[pastebin.py] findvideos")
# Downloads page
data = item.url
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
|
[
"core.item.Item",
"core.httptools.downloadpage",
"platformcode.logger.info",
"core.scrapertools.decodeHtmlentities",
"core.servertools.find_video_items",
"re.compile"
] |
[((519, 559), 'platformcode.logger.info', 'logger.info', (['"""[hokutonoken.py] mainlist"""'], {}), "('[hokutonoken.py] mainlist')\n", (530, 559), False, 'from platformcode import logger\n'), ((1427, 1464), 'platformcode.logger.info', 'logger.info', (['"""hokutonoken.py episodi"""'], {}), "('hokutonoken.py episodi')\n", (1438, 1464), False, 'from platformcode import logger\n'), ((2103, 2142), 'platformcode.logger.info', 'logger.info', (['"""[pastebin.py] findvideos"""'], {}), "('[pastebin.py] findvideos')\n", (2114, 2142), False, 'from platformcode import logger\n'), ((2201, 2240), 'core.servertools.find_video_items', 'servertools.find_video_items', ([], {'data': 'data'}), '(data=data)\n', (2229, 2240), False, 'from core import servertools\n'), ((577, 874), 'core.item.Item', 'Item', ([], {'channel': '__channel__', 'title': '"""[COLOR azure]Hokuto no Ken - Prima Serie[/COLOR]"""', 'action': '"""episodi"""', 'url': '"""http://pastebin.com/BUqD13hb"""', 'thumbnail': '"""http://i.imgur.com/MGkqu7c.jpg"""', 'fanart': '"""http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg"""'}), "(channel=__channel__, title=\n '[COLOR azure]Hokuto no Ken - Prima Serie[/COLOR]', action='episodi',\n url='http://pastebin.com/BUqD13hb', thumbnail=\n 'http://i.imgur.com/MGkqu7c.jpg', fanart=\n 'http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg'\n )\n", (581, 874), False, 'from core.item import Item\n'), ((973, 1298), 'core.item.Item', 'Item', ([], {'channel': '__channel__', 'title': '"""[COLOR azure]Hokuto no Ken - Seconda Serie[/COLOR]"""', 'action': '"""episodi"""', 'url': '"""http://pastebin.com/mHXQRBxZ"""', 'thumbnail': '"""http://i159.photobucket.com/albums/t123/Janthem/hnk2.jpg"""', 'fanart': '"""http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg"""'}), "(channel=__channel__, title=\n '[COLOR azure]Hokuto no Ken - Seconda Serie[/COLOR]', action='episodi',\n url='http://pastebin.com/mHXQRBxZ', thumbnail=\n 'http://i159.photobucket.com/albums/t123/Janthem/hnk2.jpg', fanart=\n 'http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg'\n )\n", (977, 1298), False, 'from core.item import Item\n'), ((1517, 1549), 'core.httptools.downloadpage', 'httptools.downloadpage', (['item.url'], {}), '(item.url)\n', (1539, 1549), False, 'from core import httptools\n'), ((1799, 1844), 'core.scrapertools.decodeHtmlentities', 'scrapertools.decodeHtmlentities', (['scrapedtitle'], {}), '(scrapedtitle)\n', (1830, 1844), False, 'from core import scrapertools\n'), ((1686, 1715), 're.compile', 're.compile', (['patron', 're.DOTALL'], {}), '(patron, re.DOTALL)\n', (1696, 1715), False, 'import re\n'), ((1882, 1992), 'core.item.Item', 'Item', ([], {'channel': '__channel__', 'action': '"""findvid"""', 'title': 'scrapedtitle', 'thumbnail': 'item.thumbnail', 'url': 'scrapedurl'}), "(channel=__channel__, action='findvid', title=scrapedtitle, thumbnail=\n item.thumbnail, url=scrapedurl)\n", (1886, 1992), False, 'from core.item import Item\n')]
|
import os
import json, boto3
def lambda_handler(event, context):
print("Trigger Event: ")
print(event)
region = os.environ['REGION']
elbv2_client = boto3.client('elbv2', region_name=region)
available_target_groups = os.environ['AVAILABLE_TARGET_GROUPS']
arr_available_target_groups = available_target_groups.split(',')
# Get HTTP Target Group.
http_listener_arn = os.environ['HTTP_LISTENER_ARN']
http_listener = elbv2_client.describe_rules( ListenerArn=http_listener_arn)
http_target_group_arn = get_current_http_target_group(http_listener['Rules'], arr_available_target_groups)
if http_target_group_arn==False:
print("Could not identify the target arn")
return False
print("Current HTTP target group: ")
print(http_target_group_arn)
# Get HTTPS listener rules.
https_listener_arn = os.environ['SSL_LISTENER_ARN']
https_listener = elbv2_client.describe_rules(ListenerArn=https_listener_arn)
https_listener_rules = https_listener['Rules']
print("Current HTTPS target group: ")
https_target_group_arn = get_current_http_target_group(https_listener['Rules'], arr_available_target_groups)
print(https_target_group_arn)
results = {}
i = 0
while i < len(https_listener_rules):
# Skip default rule
if https_listener_rules[i]['IsDefault']==True:
i +=1
continue
actions = https_listener_rules[i]['Actions']
actions, modify = process_actions(actions, http_target_group_arn, arr_available_target_groups)
if modify==1:
print("Updating SSL listener rules..")
rule_arn = https_listener_rules[i]['RuleArn']
results[rule_arn] = modify_rules(elbv2_client, rule_arn, actions)
i +=1
# For ECS After Allow Test Traffic hook
print(results)
send_codedeploy_validation_status(event, results)
return results
# Returns the current B/G target group from a list of lister rules.
def get_current_http_target_group(http_listener_rules, arr_available_target_groups):
i=0
while i < len(http_listener_rules):
# Continue if default listener rule.
if http_listener_rules[i]['IsDefault']==True:
i +=1
continue
actions = http_listener_rules[i]['Actions']
n=0
while n<len(actions):
try:
for tg in actions[n]['ForwardConfig']['TargetGroups']:
if tg['TargetGroupArn'] in arr_available_target_groups and (tg['Weight'] is 100 or tg['Weight'] is 1) :
return tg['TargetGroupArn']
except Exception as e:
print(e)
n +=1
i +=1
return False
def process_actions(actions, http_target_group_arn, arr_available_target_groups):
modify = 0
for ak, action in enumerate(actions):
try:
if action['Type'] == "forward" and check_target_update(action['TargetGroupArn'], arr_available_target_groups):
actions[ak]['TargetGroupArn']=http_target_group_arn
for tgk, target_group in enumerate(action['ForwardConfig']['TargetGroups']):
if check_target_update(target_group['TargetGroupArn'], arr_available_target_groups):
actions[ak]['ForwardConfig']['TargetGroups'][tgk]['TargetGroupArn']=http_target_group_arn
modify=1
except Exception as e:
print(e)
return (actions), modify
# Check old target group is associated w/out available target and different.
# Be wary I found its possible the Listener rule is updated at the initial Ready Stage.
# DO NOT TRY COMPARING OLD AN NEW, SIMPLY ALWAYS UPDATE TO MATCH HTTP IF ONE OF THE AVAILABLE TARGETS
def check_target_update(old_target_group, arr_available_target_groups):
return old_target_group in arr_available_target_groups
# Sends notification to CodeDeploy on hook status...
def send_codedeploy_validation_status(event, results):
region = os.environ['REGION']
codedeploy_client = boto3.client('codedeploy', region_name=region)
status = ('Failed', 'Succeeded')[len(results) > 0]
print(status)
try:
return codedeploy_client.put_lifecycle_event_hook_execution_status(
deploymentId=event['DeploymentId'],
lifecycleEventHookExecutionId=event['LifecycleEventHookExecutionId'],
status=status
)
except Exception as e:
print("Recoverable Exception: ")
print(e)
return False
def modify_rules(elbv2_client, arn, actions):
try:
return elbv2_client.modify_rule(
RuleArn=arn,
Actions=actions
)
except Exception as e:
print(e)
|
[
"boto3.client"
] |
[((165, 206), 'boto3.client', 'boto3.client', (['"""elbv2"""'], {'region_name': 'region'}), "('elbv2', region_name=region)\n", (177, 206), False, 'import json, boto3\n'), ((4075, 4121), 'boto3.client', 'boto3.client', (['"""codedeploy"""'], {'region_name': 'region'}), "('codedeploy', region_name=region)\n", (4087, 4121), False, 'import json, boto3\n')]
|
# worldtime module by CantSayIHave
# Created 2018/01/12
#
# Fetch time and date from a location
# Uses Google Geocoding API and Google Time Zone API
import aiohttp
import time
from datetime import datetime
API_GEOCODE = 'https://maps.googleapis.com/maps/api/geocode/json?'
API_TIMEZONE = 'https://maps.googleapis.com/maps/api/timezone/json?'
allowed_chars = [',', '%', '+', '-']
class Location:
def __init__(self, address: str, lat: int = None, long: int = None):
self.address = address
self.latitude = lat
self.longitude = long
class Time:
def __init__(self, time: datetime, timezone_id: str, timezone_name: str):
self.time = time
self.timezone_id = timezone_id
self.timezone_name = timezone_name
class WorldTime:
def __init__(self, key: str):
self.key = key
async def get_location(self, query: str) -> Location:
args = {'address': self.query_encode(query),
'key': self.key}
url = API_GEOCODE + self.param_encode(args)
search = await self.api_get(url)
if search:
try:
result = search['results'][0]
location = Location(address=result['formatted_address'],
lat=result['geometry']['location']['lat'],
long=result['geometry']['location']['lng'])
return location
except KeyError:
print('WorldTime Location Key Error')
raise
else:
return search
async def get_time(self, location: Location) -> Time:
unix_now = int(time.time())
args = {'location': '{},{}'.format(location.latitude, location.longitude),
'timestamp': unix_now,
'key': self.key}
url = API_TIMEZONE + self.param_encode(args)
search = await self.api_get(url)
if search:
try:
location_time = unix_now + search['rawOffset'] + search['dstOffset']
return Time(time=datetime.fromtimestamp(location_time),
timezone_id=search['timeZoneId'],
timezone_name=search['timeZoneName'])
except KeyError:
print('WorldTime Time Key Error')
raise
else:
return search
@staticmethod
async def api_get(url: str) -> dict:
with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
j = await resp.json()
if j['status'] == 'OK':
return j
elif j['status'] == 'ZERO_RESULTS':
return None
return False
@staticmethod
def query_encode(text: str) -> str:
text = ' '.join(text.split())
text = text.replace(' ', '+')
for c in text:
if c not in allowed_chars and not c.isalnum():
text = text.replace(c, '%' + hex(ord(c))[2:])
return text
@staticmethod
def param_encode(options: dict) -> str:
out = ''
for k, v in options.items():
out += '{}={}&'.format(k, v)
out = out[:-1]
return out
|
[
"aiohttp.ClientSession",
"datetime.datetime.fromtimestamp",
"time.time"
] |
[((1652, 1663), 'time.time', 'time.time', ([], {}), '()\n', (1661, 1663), False, 'import time\n'), ((2454, 2477), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (2475, 2477), False, 'import aiohttp\n'), ((2072, 2109), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['location_time'], {}), '(location_time)\n', (2094, 2109), False, 'from datetime import datetime\n')]
|
import numpy as np
from matplotlib import pyplot as plt
n = 100
x = range(0,n)
y = range(0,n)
for k in range(0, n):
y[k] = y[k] + 3*np.random.randn() + 100
plt.figure(figsize=(20,10))
plt.scatter(x, y)
plt.savefig("./images/rawData.png")
X = np.zeros([n,1])
target = np.zeros([n,1])
X[:,0] = x
target[:,0] = y
np.savetxt("X.txt", X, delimiter=",", fmt='%f')
np.savetxt("y.txt", target, delimiter=",", fmt='%f')
|
[
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig"
] |
[((162, 190), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (172, 190), True, 'from matplotlib import pyplot as plt\n'), ((190, 207), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (201, 207), True, 'from matplotlib import pyplot as plt\n'), ((208, 243), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/rawData.png"""'], {}), "('./images/rawData.png')\n", (219, 243), True, 'from matplotlib import pyplot as plt\n'), ((249, 265), 'numpy.zeros', 'np.zeros', (['[n, 1]'], {}), '([n, 1])\n', (257, 265), True, 'import numpy as np\n'), ((274, 290), 'numpy.zeros', 'np.zeros', (['[n, 1]'], {}), '([n, 1])\n', (282, 290), True, 'import numpy as np\n'), ((317, 364), 'numpy.savetxt', 'np.savetxt', (['"""X.txt"""', 'X'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "('X.txt', X, delimiter=',', fmt='%f')\n", (327, 364), True, 'import numpy as np\n'), ((365, 417), 'numpy.savetxt', 'np.savetxt', (['"""y.txt"""', 'target'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "('y.txt', target, delimiter=',', fmt='%f')\n", (375, 417), True, 'import numpy as np\n'), ((137, 154), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (152, 154), True, 'import numpy as np\n')]
|
from time import timezone
import pandas as pd
from datetime import datetime
# print(datetime.fromtimestamp(1603209600))
# print(datetime.fromtimestamp(1612868324294/1000))
# print(datetime.fromtimestamp(1613283396746//1000))
print(datetime.fromtimestamp(1640851200))
print(datetime.fromtimestamp(1640649600))
print(datetime.fromtimestamp(1640617020000//1000))
a = datetime.now()
b = pd.Timestamp(ts_input=a, tzinfo=a.tzinfo)
c = b.floor(freq='T')
d = b.ceil(freq='T')
e = d.timestamp()
f = int(e)
g = datetime.fromtimestamp(f)
print(a, c, d, g)
delta = datetime.now() - datetime.utcnow()
print(delta.seconds / 3600)
|
[
"datetime.datetime.utcnow",
"pandas.Timestamp",
"datetime.datetime.now",
"datetime.datetime.fromtimestamp"
] |
[((365, 379), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (377, 379), False, 'from datetime import datetime\n'), ((384, 425), 'pandas.Timestamp', 'pd.Timestamp', ([], {'ts_input': 'a', 'tzinfo': 'a.tzinfo'}), '(ts_input=a, tzinfo=a.tzinfo)\n', (396, 425), True, 'import pandas as pd\n'), ((502, 527), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['f'], {}), '(f)\n', (524, 527), False, 'from datetime import datetime\n'), ((232, 266), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1640851200)'], {}), '(1640851200)\n', (254, 266), False, 'from datetime import datetime\n'), ((274, 308), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1640649600)'], {}), '(1640649600)\n', (296, 308), False, 'from datetime import datetime\n'), ((316, 361), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1640617020000 // 1000)'], {}), '(1640617020000 // 1000)\n', (338, 361), False, 'from datetime import datetime\n'), ((555, 569), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (567, 569), False, 'from datetime import datetime\n'), ((572, 589), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (587, 589), False, 'from datetime import datetime\n')]
|
import pytest
from now_lms import init_app, lms_app
lms_app.app_context().push()
@pytest.fixture(scope="package", autouse=True)
def setup_database():
init_app()
|
[
"now_lms.init_app",
"pytest.fixture",
"now_lms.lms_app.app_context"
] |
[((85, 130), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""package"""', 'autouse': '(True)'}), "(scope='package', autouse=True)\n", (99, 130), False, 'import pytest\n'), ((157, 167), 'now_lms.init_app', 'init_app', ([], {}), '()\n', (165, 167), False, 'from now_lms import init_app, lms_app\n'), ((54, 75), 'now_lms.lms_app.app_context', 'lms_app.app_context', ([], {}), '()\n', (73, 75), False, 'from now_lms import init_app, lms_app\n')]
|
"""Sync local directories with neocities.org sites."""
import os
import sys
from . import cmdline
from . import local
from .config import load_config_file
from .ignore_files import IgnoreFiles
from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info)
from .neocities import Neocities
from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions
from .utils import Pushd
def main():
"""Program entry-point."""
cmdline_opts = cmdline.parse(sys.argv[1:])
if cmdline_opts.quietness > 0:
for _ in range(cmdline_opts.quietness):
decrease_verbosity()
elif cmdline_opts.quietness < 0:
for _ in range(-cmdline_opts.quietness):
increase_verbosity()
try:
conf = load_config_file(cmdline_opts.config_file)
except FileNotFoundError:
fatal(f'Config file "{cmdline_opts.config_file}" not found. Run again with "--help" for more info.')
exit(1)
for site, site_conf in conf.items():
client = Neocities(site_conf.api_key)
with Pushd(os.path.expanduser(site_conf.root_dir)):
info(f'Starting sync for site "{site}".')
info("Listing local file tree...")
local_filetree = local.filetree(".")
local_filetree = IgnoreFiles(site_conf).filter(local_filetree)
info(
f"Local file tree has {local_filetree.number_of_files()} file(s)"
f" and {local_filetree.number_of_directories()} dir(s)."
)
info("Fetching remote file tree...")
remote_filetree = client.list()
info(
f"Remote file tree has {remote_filetree.number_of_files()}"
f" file(s) and {remote_filetree.number_of_directories()} dir(s)."
)
info("Comparing file trees...")
applied_actions = 0
for action in sync_actions(local_filetree, remote_filetree):
try:
if isinstance(action, UpdateRemote):
info(f'Updating remote file "{action.path}": {action.reason}.')
if not cmdline_opts.dry_run:
client.upload(action.path)
applied_actions += 1
elif isinstance(action, DeleteRemote):
info(f'Deleting remote file "{action.path}": {action.reason}.')
if not cmdline_opts.dry_run:
client.delete(action.path)
applied_actions += 1
elif isinstance(action, DoNothing):
debug(f'Skipping "{action.path}": {action.reason}.')
else:
raise RuntimeError(f"Unknown action {action.__class__.__name__}.")
except Exception as e: # noqa: B902
error(f"Error while syncing: {e}")
exit(1)
if not cmdline_opts.dry_run:
info(f"Applied {applied_actions} action(s).")
else:
info(f"Would apply {applied_actions} action(s).")
if site_conf.remove_empty_dirs:
info("Searching for empty directories...")
remote_filetree = client.list()
empty_directories = remote_filetree.list_empty_directories()
info(f"Found {len(empty_directories)} empty dir(s).")
for empty_dir in sorted(empty_directories, reverse=True):
info(f'Deleting remote empty directory "{empty_dir}"')
if not cmdline_opts.dry_run:
client.delete(empty_dir)
info(f'Finished sync for site "{site}".')
if __name__ == "__main__":
main()
|
[
"os.path.expanduser"
] |
[((1074, 1112), 'os.path.expanduser', 'os.path.expanduser', (['site_conf.root_dir'], {}), '(site_conf.root_dir)\n', (1092, 1112), False, 'import os\n')]
|
import warnings
from typing import Type, Union
def will_be_removed(
deprecated_name: str,
use_instead: Union[str, Type],
removing_in_version: str,
stacklevel=2,
):
new_class_name = (
use_instead.__name__ # type: ignore
if isinstance(use_instead, Type) # type: ignore
else use_instead
)
warnings.warn(
f"Please use {new_class_name} instead, "
f"{deprecated_name} will be removed in happyly v{removing_in_version}.",
DeprecationWarning,
stacklevel=stacklevel,
)
|
[
"warnings.warn"
] |
[((342, 518), 'warnings.warn', 'warnings.warn', (['f"""Please use {new_class_name} instead, {deprecated_name} will be removed in happyly v{removing_in_version}."""', 'DeprecationWarning'], {'stacklevel': 'stacklevel'}), "(\n f'Please use {new_class_name} instead, {deprecated_name} will be removed in happyly v{removing_in_version}.'\n , DeprecationWarning, stacklevel=stacklevel)\n", (355, 518), False, 'import warnings\n')]
|
import re
string = input()
template = r'never gonna let you down...'
match = re.match(template, string, flags=re.IGNORECASE)
|
[
"re.match"
] |
[((79, 126), 're.match', 're.match', (['template', 'string'], {'flags': 're.IGNORECASE'}), '(template, string, flags=re.IGNORECASE)\n', (87, 126), False, 'import re\n')]
|
from __future__ import print_function
import time
import os
import sys
import logging
import json
import tensorflow as tf
import numpy as np
import cv2
import data.data_loader as loader
from models.cgan_model import cgan
from models.ops import *
os.system('http_proxy_on')
def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300):
'''
return decayed learning rate
It becomes 0 at end_step
'''
decay_period = end_step - start_step
step_decay = (initial-0.0)/decay_period
update_step = max(0, step-start_step)
current_value = max(0, initial - (update_step)*step_decay)
return current_value
def train(args):
#assume there is a batch data pair:
dataset = loader.read_data_path(args.data_path_train, name=args.data_name)
num_dataset = len(dataset)
num_batch = num_dataset/args.batch_num
sess = tf.Session()
model = cgan(sess, args)
model.build_model()
model.sess.run(tf.global_variables_initializer())
model.load_weights(args.checkpoint_dir)
for iter in range(args.epoch):
learning_rate = linear_decay(0.0001, iter)
for i, data in enumerate(dataset):
blur_img, real_img = loader.read_image_pair(data,
resize_or_crop = args.resize_or_crop,
image_size=(args.img_h, args.img_w))
feed_dict = {model.input['blur_img']: blur_img,\
model.input['real_img']: real_img,\
model.learning_rate: learning_rate}
loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict)
logging.info('%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\
iter, i, loss_G, adv_loss, perceptual_loss)
#Ready for Training Discriminator
for _ in range(args.iter_disc):
loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor)
logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp)
if (iter+1) % 50 == 0 or iter == (args.epoch-1):
model.save_weights(args.checkpoint_dir, model.global_step)
logging.info("[!] test started")
dataset = loader.read_data_path(args.data_path_test, name=args.data_name)
for i, data in enumerate(dataset):
if not os.path.exists('./test_result'):
os.mkdir('./test_result')
blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop,
image_size=(args.img_h, args.img_w))
feed_dict_G = {model.input['blur_img']: blur_img}
G_out = model.G_output(feed_dict=feed_dict_G)
cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.)
cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.)
cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.)
logging.info("Deblur Image is saved (%d/%d) ", i, len(dataset))
logging.info("[*] test done")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--iter_gen', type=int, default=1)
parser.add_argument('--iter_disc', type=int, default=5)
parser.add_argument('--batch_num', type=int, default=1)
parser.add_argument('--epoch', type=int, default=300)
parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/')
parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/')
parser.add_argument('--model_name', type=str, default='DeblurGAN.model')
parser.add_argument('--summary_dir', type=str, default='./summaries/')
parser.add_argument('--data_name', type=str, default='GOPRO')
parser.add_argument('--tf_image_monitor', type=bool, default=False)
parser.add_argument('--resize_or_crop', type=str, default='resize')
parser.add_argument('--img_h', type=int, default=256)
parser.add_argument('--img_w', type=int, default=256)
parser.add_argument('--img_c', type=int, default=3)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
log_format = '[%(asctime)s %(levelname)s] %(message)s'
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level, format=log_format, stream=sys.stderr)
logging.getLogger("DeblurGAN_TRAIN.*").setLevel(level)
train(args)
|
[
"data.data_loader.read_data_path",
"os.mkdir",
"argparse.ArgumentParser",
"logging.basicConfig",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"os.system",
"os.path.exists",
"logging.info",
"data.data_loader.read_image_pair",
"models.cgan_model.cgan",
"logging.getLogger"
] |
[((251, 277), 'os.system', 'os.system', (['"""http_proxy_on"""'], {}), "('http_proxy_on')\n", (260, 277), False, 'import os\n'), ((717, 781), 'data.data_loader.read_data_path', 'loader.read_data_path', (['args.data_path_train'], {'name': 'args.data_name'}), '(args.data_path_train, name=args.data_name)\n', (738, 781), True, 'import data.data_loader as loader\n'), ((867, 879), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (877, 879), True, 'import tensorflow as tf\n'), ((892, 908), 'models.cgan_model.cgan', 'cgan', (['sess', 'args'], {}), '(sess, args)\n', (896, 908), False, 'from models.cgan_model import cgan\n'), ((2380, 2412), 'logging.info', 'logging.info', (['"""[!] test started"""'], {}), "('[!] test started')\n", (2392, 2412), False, 'import logging\n'), ((2428, 2491), 'data.data_loader.read_data_path', 'loader.read_data_path', (['args.data_path_test'], {'name': 'args.data_name'}), '(args.data_path_test, name=args.data_name)\n', (2449, 2491), True, 'import data.data_loader as loader\n'), ((3216, 3245), 'logging.info', 'logging.info', (['"""[*] test done"""'], {}), "('[*] test done')\n", (3228, 3245), False, 'import logging\n'), ((3307, 3346), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (3330, 3346), False, 'import argparse\n'), ((4627, 4697), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level', 'format': 'log_format', 'stream': 'sys.stderr'}), '(level=level, format=log_format, stream=sys.stderr)\n', (4646, 4697), False, 'import logging\n'), ((952, 985), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (983, 985), True, 'import tensorflow as tf\n'), ((2651, 2757), 'data.data_loader.read_image_pair', 'loader.read_image_pair', (['data'], {'resize_or_crop': 'args.resize_or_crop', 'image_size': '(args.img_h, args.img_w)'}), '(data, resize_or_crop=args.resize_or_crop, image_size\n =(args.img_h, args.img_w))\n', (2673, 2757), True, 'import data.data_loader as loader\n'), ((1199, 1305), 'data.data_loader.read_image_pair', 'loader.read_image_pair', (['data'], {'resize_or_crop': 'args.resize_or_crop', 'image_size': '(args.img_h, args.img_w)'}), '(data, resize_or_crop=args.resize_or_crop, image_size\n =(args.img_h, args.img_w))\n', (1221, 1305), True, 'import data.data_loader as loader\n'), ((1688, 1833), 'logging.info', 'logging.info', (['"""%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f"""', 'iter', 'i', 'loss_G', 'adv_loss', 'perceptual_loss'], {}), "(\n '%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f'\n , iter, i, loss_G, adv_loss, perceptual_loss)\n", (1700, 1833), False, 'import logging\n'), ((2093, 2231), 'logging.info', 'logging.info', (['"""%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f"""', 'iter', 'i', 'loss_D', 'loss_disc', 'loss_gp'], {}), "(\n '%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f'\n , iter, i, loss_D, loss_disc, loss_gp)\n", (2105, 2231), False, 'import logging\n'), ((2551, 2582), 'os.path.exists', 'os.path.exists', (['"""./test_result"""'], {}), "('./test_result')\n", (2565, 2582), False, 'import os\n'), ((2596, 2621), 'os.mkdir', 'os.mkdir', (['"""./test_result"""'], {}), "('./test_result')\n", (2604, 2621), False, 'import os\n'), ((4702, 4740), 'logging.getLogger', 'logging.getLogger', (['"""DeblurGAN_TRAIN.*"""'], {}), "('DeblurGAN_TRAIN.*')\n", (4719, 4740), False, 'import logging\n')]
|
import os
import sys
import json
import click
import datetime
from distutils.version import StrictVersion
from jinja2 import Template
ROOTDIR = os.getcwd()
INITIAL_VERSION = '0.0.0'
DEFAULT_TEMPLATE = """# Changelog
Note: version releases in the 0.x.y range may introduce breaking changes.
{% for release in releases %}
## {{ release.id }}
{% for data in release.data %}
- {{ data.type }}: {{ data.description }}
{% endfor %}
{% endfor %}
"""
class Semversioner:
def __init__(self, path=ROOTDIR):
semversioner_path_legacy = os.path.join(path, '.changes')
semversioner_path_new = os.path.join(path, '.semversioner')
semversioner_path = semversioner_path_new
deprecated = False
if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new):
deprecated = True
semversioner_path = semversioner_path_legacy
if not os.path.isdir(semversioner_path):
os.makedirs(semversioner_path)
next_release_path = os.path.join(semversioner_path, 'next-release')
if not os.path.isdir(next_release_path):
os.makedirs(next_release_path)
self.path = path
self.semversioner_path = semversioner_path
self.next_release_path = next_release_path
self.deprecated = deprecated
def is_deprecated(self):
return self.deprecated
def add_change(self, change_type, description):
"""
Create a new changeset file.
The method creates a new json file in the ``.semversioner/next-release/`` directory
with the type and description provided.
Parameters
-------
change_type (str): Change type. Allowed values: major, minor, patch.
description (str): Change description.
Returns
-------
path : str
Absolute path of the file generated.
"""
parsed_values = {
'type': change_type,
'description': description,
}
filename = None
while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))):
filename = '{type_name}-{datetime}.json'.format(
type_name=parsed_values['type'],
datetime="{:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow()))
with open(os.path.join(self.next_release_path, filename), 'w') as f:
f.write(json.dumps(parsed_values, indent=2) + "\n")
return {
'path': os.path.join(self.next_release_path, filename)
}
def generate_changelog(self):
"""
Generates the changelog.
The method generates the changelog based on the template file defined
in ``DEFAULT_TEMPLATE``.
Returns
-------
str
Changelog string.
"""
releases = []
for release_identifier in self._sorted_releases():
with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f:
data = json.load(f)
data = sorted(data, key=lambda k: k['type'] + k['description'])
releases.append({'id': release_identifier, 'data': data})
return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases)
def release(self):
"""
Performs the release.
The method performs the release by taking everything in ``next-release`` folder
and aggregating all together in a single JSON file for that release (e.g ``1.12.0.json``).
The JSON file generated is a list of all the individual JSON files from ``next-release``.
After aggregating the files, it removes the ``next-release`` directory.
Returns
-------
previous_version : str
Previous version.
new_version : str
New version.
"""
changes = []
next_release_dir = self.next_release_path
for filename in os.listdir(next_release_dir):
full_path = os.path.join(next_release_dir, filename)
with open(full_path) as f:
changes.append(json.load(f))
if len(changes) == 0:
click.secho("Error: No changes to release. Skipping release process.", fg='red')
sys.exit(-1)
current_version_number = self.get_version()
next_version_number = self._get_next_version_number(changes, current_version_number)
click.echo("Releasing version: %s -> %s" % (current_version_number, next_version_number))
release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number)
click.echo("Generated '" + release_json_filename + "' file.")
with open(release_json_filename, 'w') as f:
f.write(json.dumps(changes, indent=2, sort_keys=True))
click.echo("Removing '" + next_release_dir + "' directory.")
for filename in os.listdir(next_release_dir):
full_path = os.path.join(next_release_dir, filename)
os.remove(full_path)
os.rmdir(next_release_dir)
return {
'previous_version': current_version_number,
'new_version': next_version_number
}
def get_version(self):
"""
Gets the current version.
"""
releases = self._sorted_releases()
if len(releases) > 0:
return releases[0]
return INITIAL_VERSION
def _sorted_releases(self):
files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))]
releases = list(map(lambda x: x[:-len('.json')], files))
releases = sorted(releases, key=StrictVersion, reverse=True)
return releases
def _get_next_version_number(self, changes, current_version_number):
release_type = sorted(list(map(lambda x: x['type'], changes)))[0]
return self._increase_version(current_version_number, release_type)
def _increase_version(self, current_version, release_type):
"""
Returns a string like '1.0.0'.
"""
# Convert to a list of ints: [1, 0, 0].
version_parts = list(int(i) for i in current_version.split('.'))
if release_type == 'patch':
version_parts[2] += 1
elif release_type == 'minor':
version_parts[1] += 1
version_parts[2] = 0
elif release_type == 'major':
version_parts[0] += 1
version_parts[1] = 0
version_parts[2] = 0
return '.'.join(str(i) for i in version_parts)
|
[
"jinja2.Template",
"os.remove",
"json.load",
"os.makedirs",
"os.getcwd",
"os.path.isdir",
"click.echo",
"json.dumps",
"datetime.datetime.utcnow",
"os.rmdir",
"click.secho",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((145, 156), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (154, 156), False, 'import os\n'), ((542, 572), 'os.path.join', 'os.path.join', (['path', '""".changes"""'], {}), "(path, '.changes')\n", (554, 572), False, 'import os\n'), ((605, 640), 'os.path.join', 'os.path.join', (['path', '""".semversioner"""'], {}), "(path, '.semversioner')\n", (617, 640), False, 'import os\n'), ((1024, 1071), 'os.path.join', 'os.path.join', (['semversioner_path', '"""next-release"""'], {}), "(semversioner_path, 'next-release')\n", (1036, 1071), False, 'import os\n'), ((3983, 4011), 'os.listdir', 'os.listdir', (['next_release_dir'], {}), '(next_release_dir)\n', (3993, 4011), False, 'import os\n'), ((4465, 4558), 'click.echo', 'click.echo', (["('Releasing version: %s -> %s' % (current_version_number, next_version_number))"], {}), "('Releasing version: %s -> %s' % (current_version_number,\n next_version_number))\n", (4475, 4558), False, 'import click\n'), ((4588, 4657), 'os.path.join', 'os.path.join', (['self.semversioner_path', "('%s.json' % next_version_number)"], {}), "(self.semversioner_path, '%s.json' % next_version_number)\n", (4600, 4657), False, 'import os\n'), ((4667, 4728), 'click.echo', 'click.echo', (['("Generated \'" + release_json_filename + "\' file.")'], {}), '("Generated \'" + release_json_filename + "\' file.")\n', (4677, 4728), False, 'import click\n'), ((4857, 4917), 'click.echo', 'click.echo', (['("Removing \'" + next_release_dir + "\' directory.")'], {}), '("Removing \'" + next_release_dir + "\' directory.")\n', (4867, 4917), False, 'import click\n'), ((4942, 4970), 'os.listdir', 'os.listdir', (['next_release_dir'], {}), '(next_release_dir)\n', (4952, 4970), False, 'import os\n'), ((5078, 5104), 'os.rmdir', 'os.rmdir', (['next_release_dir'], {}), '(next_release_dir)\n', (5086, 5104), False, 'import os\n'), ((730, 769), 'os.path.isdir', 'os.path.isdir', (['semversioner_path_legacy'], {}), '(semversioner_path_legacy)\n', (743, 769), False, 'import os\n'), ((918, 950), 'os.path.isdir', 'os.path.isdir', (['semversioner_path'], {}), '(semversioner_path)\n', (931, 950), False, 'import os\n'), ((964, 994), 'os.makedirs', 'os.makedirs', (['semversioner_path'], {}), '(semversioner_path)\n', (975, 994), False, 'import os\n'), ((1087, 1119), 'os.path.isdir', 'os.path.isdir', (['next_release_path'], {}), '(next_release_path)\n', (1100, 1119), False, 'import os\n'), ((1133, 1163), 'os.makedirs', 'os.makedirs', (['next_release_path'], {}), '(next_release_path)\n', (1144, 1163), False, 'import os\n'), ((2513, 2559), 'os.path.join', 'os.path.join', (['self.next_release_path', 'filename'], {}), '(self.next_release_path, filename)\n', (2525, 2559), False, 'import os\n'), ((4037, 4077), 'os.path.join', 'os.path.join', (['next_release_dir', 'filename'], {}), '(next_release_dir, filename)\n', (4049, 4077), False, 'import os\n'), ((4205, 4290), 'click.secho', 'click.secho', (['"""Error: No changes to release. Skipping release process."""'], {'fg': '"""red"""'}), "('Error: No changes to release. Skipping release process.', fg='red'\n )\n", (4216, 4290), False, 'import click\n'), ((4298, 4310), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (4306, 4310), False, 'import sys\n'), ((4996, 5036), 'os.path.join', 'os.path.join', (['next_release_dir', 'filename'], {}), '(next_release_dir, filename)\n', (5008, 5036), False, 'import os\n'), ((5049, 5069), 'os.remove', 'os.remove', (['full_path'], {}), '(full_path)\n', (5058, 5069), False, 'import os\n'), ((778, 814), 'os.path.isdir', 'os.path.isdir', (['semversioner_path_new'], {}), '(semversioner_path_new)\n', (791, 814), False, 'import os\n'), ((2093, 2139), 'os.path.join', 'os.path.join', (['self.next_release_path', 'filename'], {}), '(self.next_release_path, filename)\n', (2105, 2139), False, 'import os\n'), ((2351, 2397), 'os.path.join', 'os.path.join', (['self.next_release_path', 'filename'], {}), '(self.next_release_path, filename)\n', (2363, 2397), False, 'import os\n'), ((3051, 3063), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3060, 3063), False, 'import json\n'), ((3225, 3269), 'jinja2.Template', 'Template', (['DEFAULT_TEMPLATE'], {'trim_blocks': '(True)'}), '(DEFAULT_TEMPLATE, trim_blocks=True)\n', (3233, 3269), False, 'from jinja2 import Template\n'), ((4801, 4846), 'json.dumps', 'json.dumps', (['changes'], {'indent': '(2)', 'sort_keys': '(True)'}), '(changes, indent=2, sort_keys=True)\n', (4811, 4846), False, 'import json\n'), ((5520, 5554), 'os.listdir', 'os.listdir', (['self.semversioner_path'], {}), '(self.semversioner_path)\n', (5530, 5554), False, 'import os\n'), ((2430, 2465), 'json.dumps', 'json.dumps', (['parsed_values'], {'indent': '(2)'}), '(parsed_values, indent=2)\n', (2440, 2465), False, 'import json\n'), ((2954, 3020), 'os.path.join', 'os.path.join', (['self.semversioner_path', "(release_identifier + '.json')"], {}), "(self.semversioner_path, release_identifier + '.json')\n", (2966, 3020), False, 'import os\n'), ((4148, 4160), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4157, 4160), False, 'import json\n'), ((5573, 5612), 'os.path.join', 'os.path.join', (['self.semversioner_path', 'f'], {}), '(self.semversioner_path, f)\n', (5585, 5612), False, 'import os\n'), ((2303, 2329), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2327, 2329), False, 'import datetime\n')]
|
import requests as rq
from sys import stdout
from pathlib import Path
import re
import os
class Downloader:
"""
class to manage downloading url links
"""
def __init__(self, *args, session=None): # creates a session
self.cwd = Path.cwd()
self.src_path = Path(__file__)
if not session:
self.session = rq.Session()
else:
self.session = session
def _print_progress(self, current_bytes, size):
bar = self._get_bar(current_bytes / size)
output = f'\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB'
stdout.write(output)
# stdout.flush()
def _get_bar(self, progress):
"""
progress must be between 0 and 1\n
Returns the bar with current progress as a string
"""
FULL_BLOCKLENGTH = 32
fillblock = '█'
if progress > 1:
progress = 1
blocks = int(progress / (1/FULL_BLOCKLENGTH))
bar_start = fillblock*blocks
bar_end = (33 - len(bar_start))*'_'+'|'
bar_percent = f' {progress*100:0.2f} % '
text = bar_start+bar_end+bar_percent
return text
def _make_name(self, url_path: Path, name_in: str):
"""
Parses the name and returns a writebale name
"""
# in case its a number and not None
if name_in and name_in != type(str):
name_in = str(name_in)
try:
name_in[0] # if its empty it raises exception
# clean_name = re.search(r'\w+',name_in).group() # parsing name, only alphanumeric, no whitespace
# name = re.split(r'[.].+$',name_in)[0] # name without extension
name_parts = name_in.split('.') # name without extension
if len(name_parts) > 1:
name_noext = '.'.join(name_parts[:-1]) # joining together without extension
else:
name_noext = name_parts[0]
clean_name = ' '.join(re.findall(r'\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace
clean_name[0] # empty testing
except :
print('invalid name, taking name from url')
name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it
return name
try:
extension = re.search(r'(?<=[.])\w+$', name_in).group() # matching only extension after last "."
# extension = name.split('.')[-1] # matching only extension after last "."
except:
extension = None
if extension:
name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not in the name
else:
name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it
extension = re.search(r'(?<=[.])\w+$', name).group() # matching only extension after last "."
name_path = Path(f'{clean_name}.{extension}') # extension from url
return name_path.name
def download(self, url, d_path=None, name_out=None, printprogess=False):
"""
Downloads from url
`d_path`: Default download path is current working directory.
`name_out`: Default name is the tail of the url address,
can take in a name with or without extension,
takes extension from url if not specified.
`printprogress`: Prints current download progress in terminal.
"""
url_path = Path(url)
#download_path = self.cwd / url_path.name if not d_path else Path(d_path)
name_out = self._make_name(url_path, name_out)
if not d_path:
# download_path = self.src_path.parent
download_path = self.cwd
else:
download_path = Path(d_path)
# os.chdir(download_path)
# making file path
save_file = download_path / name_out
# checking if file already is there
if save_file.exists():
print('skipping', save_file.name)
return
r = self.session.get(url)
# size = float(r.headers['content-length'])
contentlength = r.headers.get('content-length')
if contentlength is not None:
size = float(contentlength)
else:
size = 1
with open(save_file, 'wb') as fd:
tmp = 0
print(f'Downloding: {save_file.name}')
print(f'to {save_file.absolute()}')
for chunk in r.iter_content(chunk_size=1024):
if chunk:
fd.write(chunk)
tmp += 1024
if printprogess:
self._print_progress(tmp, size)
print('')
print('Done')
def input_loop():
while True:
inp = input('Download path:\n')
if _test_write(inp): return inp
#try:
# d_path = Path(inp)
#except Exception as e:
# print('invalid path, try again\n')
# continue
#if d_path.exists(): return d_path
def name_loop():
while True:
inp = input('Name:\n')
return inp
def _test_write(path):
''' writes a file to the path and returns True if it succeded '''
writable = False
try:
p = Path(path)
test_file = p / 'testfile.testfile'
with open(test_file, 'wb') as f:
f.write(bytes(0))
writable = True
except Exception as e:
print('write test failed: ',e)
return
finally:
try:
os.remove(test_file)
except Exception as e:
#print('deleting test write failed: ',e)
pass
return writable
if __name__ == "__main__":
# d_path = input_loop() #let user decide where to download
d_path = Path('/home/bruno/Desktop')
name = name_loop() # let user decide what name it will have
d = Downloader()
test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260'
d.download(test_image_url, d_path, name, printprogess=False)
|
[
"sys.stdout.write",
"os.remove",
"re.split",
"requests.Session",
"pathlib.Path",
"re.findall",
"pathlib.Path.cwd",
"re.search"
] |
[((5953, 5980), 'pathlib.Path', 'Path', (['"""/home/bruno/Desktop"""'], {}), "('/home/bruno/Desktop')\n", (5957, 5980), False, 'from pathlib import Path\n'), ((262, 272), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (270, 272), False, 'from pathlib import Path\n'), ((298, 312), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (302, 312), False, 'from pathlib import Path\n'), ((632, 652), 'sys.stdout.write', 'stdout.write', (['output'], {}), '(output)\n', (644, 652), False, 'from sys import stdout\n'), ((3557, 3566), 'pathlib.Path', 'Path', (['url'], {}), '(url)\n', (3561, 3566), False, 'from pathlib import Path\n'), ((5411, 5421), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (5415, 5421), False, 'from pathlib import Path\n'), ((366, 378), 'requests.Session', 'rq.Session', ([], {}), '()\n', (376, 378), True, 'import requests as rq\n'), ((2665, 2698), 'pathlib.Path', 'Path', (['f"""{clean_name}.{extension}"""'], {}), "(f'{clean_name}.{extension}')\n", (2669, 2698), False, 'from pathlib import Path\n'), ((2980, 3013), 'pathlib.Path', 'Path', (['f"""{clean_name}.{extension}"""'], {}), "(f'{clean_name}.{extension}')\n", (2984, 3013), False, 'from pathlib import Path\n'), ((3866, 3878), 'pathlib.Path', 'Path', (['d_path'], {}), '(d_path)\n', (3870, 3878), False, 'from pathlib import Path\n'), ((5694, 5714), 'os.remove', 'os.remove', (['test_file'], {}), '(test_file)\n', (5703, 5714), False, 'import os\n'), ((2048, 2080), 're.findall', 're.findall', (['"""\\\\w+.+"""', 'name_noext'], {}), "('\\\\w+.+', name_noext)\n", (2058, 2080), False, 'import re\n'), ((2783, 2813), 're.split', 're.split', (['"""[?]"""', 'url_path.name'], {}), "('[?]', url_path.name)\n", (2791, 2813), False, 'import re\n'), ((2268, 2298), 're.split', 're.split', (['"""[?]"""', 'url_path.name'], {}), "('[?]', url_path.name)\n", (2276, 2298), False, 'import re\n'), ((2397, 2432), 're.search', 're.search', (['"""(?<=[.])\\\\w+$"""', 'name_in'], {}), "('(?<=[.])\\\\w+$', name_in)\n", (2406, 2432), False, 'import re\n'), ((2873, 2905), 're.search', 're.search', (['"""(?<=[.])\\\\w+$"""', 'name'], {}), "('(?<=[.])\\\\w+$', name)\n", (2882, 2905), False, 'import re\n')]
|
import random
import chess
import chess.engine
class RandomPlayer:
def __init__(self):
self.id = {
'name': 'RandomPlayer'
}
def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult:
legal_moves = list(board.legal_moves)
move = random.choice(legal_moves)
return chess.engine.PlayResult(move=move, ponder=None)
def quit(self):
pass
|
[
"chess.engine.PlayResult",
"random.choice"
] |
[((300, 326), 'random.choice', 'random.choice', (['legal_moves'], {}), '(legal_moves)\n', (313, 326), False, 'import random\n'), ((343, 390), 'chess.engine.PlayResult', 'chess.engine.PlayResult', ([], {'move': 'move', 'ponder': 'None'}), '(move=move, ponder=None)\n', (366, 390), False, 'import chess\n')]
|
# dwmDistances
# Being written September 2019 by <NAME>
# Intended for use with DWM 1001 module through UART TLV interface
# This script calls the dwm_loc_get API call as specified in the
# DWM1001 Firmware API Guide 5.3.10.
# It parses the information received to send over
# the ROS network.
# In the future, this script will be expanded to allow
# position updates to be written to anchor nodes
# Currently limited to Python 3.6+. Use command line arguments
# to specify the name of the port (See myParser() function)
import serial # use "pip install pyserial" if you have not already done so
import time
import sys
import argparse
defaultPortName = '/dev/ttyACM0'
# On linux, you should use /dev/ttyACM0
# ON Windows, the port name may be 'COM9' or similar
def myParser():
# This function handles command lets the user specify the
# name of the port to use with a command line argument.
# --port=[name or number]
parser = argparse.ArgumentParser(description = 'get position info') # Script descript.
parser.add_argument(
'--port',
default=defaultPortName,
help='specify the name of the port to use (default: ' + defaultPortName + ' )'
)
args = parser.parse_args()
print("Using port:", args.port)
return args.port
ser = None # This will be the name of the handle to the serial port
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
# API Error codes
ERR_CODE_OK = bytes.fromhex("00")
# 1: unknown command or broken TLV frame
# 2: internal error
# 3: invalid parameter
# 4: busy
# 5: operation not permitted
# API Commands [Type, length]
DWM_POS_SET = bytes.fromhex("01 0d") # Used to set position. Follow with position as 13 bytes
DWM_POS_GET = bytes.fromhex("02 00") # Used to ask for position.
DWM_LOC_GET = bytes.fromhex("0c 00") # Request for position + distances to anchors/tags
# Response codes
TLV_TYPE_DUMMY = bytes.fromhex("00") # Reserved for SPI dummy byte
TLV_TYPE_POS_XYZ = bytes.fromhex("41") # Response position coordinates x,y,z with q
TLV_TYPE_RNG_AN_DIST = bytes.fromhex("48") # Response: Ranging anchor distances
TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex("49") # Response: Ranging anchor distances and positions
def main():
global ser
print("dwmPosGet started.")
myPort = myParser()
# Establish serial port connection
try:
ser = serial.Serial(myPort, baudrate=115200, timeout=None)
print(ser)
print("Connection established.")
except:
print("Error in trying to connect to serial port {}".format(myPort))
stopLoop = False
# Loop plan:
# 1. Ask Decawave for position
# 2. Receive response, parsing as I go
# --First response is confirmation / error code
# --Second response is position
# 2.5 Error handling
# 3. Output message
# ----------
while stopLoop is False:
getLocations()
def sendTLV(request):
global ser
txBuffer = request
try:
ser.reset_input_buffer() # Get rid of anything in the buffer that could confuse this script.
ser.write(txBuffer)
except:
print(f"Error during transmission of request {txBuffer.hex()}")
stopLoop = True
return EXIT_FAILURE
return EXIT_SUCCESS
def receiveTLV():
# Listen for TLV response from Decawave DWM1001 module
# Returns a list of [Type, Length, Value]
# If it receives TLV_TYPE_DUMMY, it keeps listening for next message
global ser # The handle for the serial port connection
typeTLV = TLV_TYPE_DUMMY
while (typeTLV == TLV_TYPE_DUMMY):
typeTLV = ser.read(1) # Read the "type" byte of the response
lengthTLV = ser.read(1) # Read the "length" byte of the response
lengthTLV = int.from_bytes(lengthTLV, byteorder='little')
valueTLV = ser.read(lengthTLV) # Read the value [error code].
return [typeTLV, lengthTLV, valueTLV]
def parsePOSvalue(value):
# This helper function takes a 13-byte position code and returns the
# x, y, z, and q values
x = int.from_bytes(value[0:4], byteorder='little')
y = int.from_bytes(value[4:8], byteorder='little')
z = int.from_bytes(value[8:12], byteorder='little')
q = int.from_bytes(value[12:13], byteorder='little')
return [x, y, z, q]
def parseTLV(typeTLV, length, value):
# TLV_TYPE_DUMMY = bytes.fromhex("00") # Reserved for SPI dummy byte
# TLV_TYPE_POS_XYZ = bytes.fromhex("41") # Response position coordinates x,y,z with q
# TLV_TYPE_RNG_AN_DIST = bytes.fromhex("48") # Response: Ranging anchor distances
# TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex("49") # Response: Ranging anchor distances and positions
if typeTLV == TLV_TYPE_POS_XYZ:
[x, y, z, q] = parsePOSvalue(value)
return [x, y, z, q]
if typeTLV == TLV_TYPE_RNG_AN_DIST:
# This code may be received from an anchor node
num_distances = int.from_bytes(value[0:1])
distances = []
for i in range (num_distances):
offset = i*13+1
addr = value[offset:offset+8].hex() # Note: Address size is 8 bytes here, not 2 bytes
d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little')
dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little')
distances.append([addr, d, dq])
return [num_distances, distances]
if typeTLV == TLV_TYPE_RNG_AN_POS_DIST:
num_distances = int.from_bytes(value[0:1], byteorder = 'little')
distances = []
for i in range(num_distances):
offset = i*13+1
addr = value[offset:offset+2].hex() # UWB address
d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance
dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality
[x,y,z,q] = parsePOSvalue(value[offset+7:offset+20])
distances.append([addr, d, dq, x, y, z, q])
return [num_distances, distances]
# Default case:
print("Error: attempted to parse TLV of type not yet supported.")
return EXIT_FAILURE
def printTLV(typeTLV, length, value):
if typeTLV == TLV_TYPE_POS_XYZ:
print( "{:_<15} {:_<15} {:_<15} {:_<5}".format('x','y','z','q'))
[x,y,z,q] = parseTLV(typeTLV, length, value)
print("{:<15} {:<15} {:<15} {:<5}".format(x,y,z,q))
if typeTLV == TLV_TYPE_RNG_AN_POS_DIST:
print("{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q'))
[num_distances, distances] = parseTLV(typeTLV, length, value)
for i in range(num_distances):
[addr, d, dq, x, y, z, q] = distances[i]
print("{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}".format(addr, d, dq, x, y, z, q))
if typeTLV == TLV_TYPE_RNG_AN_DIST:
print("{:=<5} {:=<15} {:=<5}".format('addr','d','dq'))
[num_distances, distances] = parseTLV(typeTLV, length, value)
for i in range(num_distances):
[addr, d, dq] = distances[i]
print("{:<5} {:<15} {:<5}".format(addr, d, dq))
def getLocations():
# 1. Ask Decawave for Position and distances
temp = sendTLV(DWM_LOC_GET)
if temp == EXIT_FAILURE:
return EXIT_FAILURE
# -------------
# 2. Receive response. May get dummy bytes before real response.
[typeTLV, length, value]= receiveTLV()
if value != ERR_CODE_OK:
print("Received an error message. Flushing input buffer.")
print(value)
ser.reset_input_buffer()
return EXIT_FAILURE
# ---------Now, I read until I get the position
[typeTLV, length, value] = receiveTLV() # Expect Position
if length < 13:
print("No position received. Flushing buffer.")
ser.reset_input_buffer()
return EXIT_FAILURE
else:
printTLV(typeTLV, length, value)
[typeTLV, length, value] = receiveTLV() # Expect Distances
if length < 13:
print("No distances received")
else:
printTLV(typeTLV, length, value)
# The following lines allow this script to run as a program if called directly.
if __name__ == "__main__":
main()
|
[
"serial.Serial",
"argparse.ArgumentParser"
] |
[((933, 989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""get position info"""'}), "(description='get position info')\n", (956, 989), False, 'import argparse\n'), ((2274, 2326), 'serial.Serial', 'serial.Serial', (['myPort'], {'baudrate': '(115200)', 'timeout': 'None'}), '(myPort, baudrate=115200, timeout=None)\n', (2287, 2326), False, 'import serial\n')]
|
import logging
from abc import ABC
class LoggingBase(ABC):
def __init__(self, log_level: int) -> None:
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(log_level)
@property
def log_level(self) -> int:
self.logger.level
|
[
"logging.getLogger"
] |
[((137, 179), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (154, 179), False, 'import logging\n')]
|
from tracking import generate_new_tracking_key, register_event
from django.core.urlresolvers import reverse
from django.conf import settings
USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, "USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE", False)
class UserTrackingMiddleware(object):
def process_request(self, request):
#request.session["test"] = "good"
return None
def process_response(self, request, response):
"""
Only record when we return HTML pages. Set a cookie if not set
"""
if 'text/html' in response.get('Content-Type', ''):
content = getattr(response, 'content', '')
if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find("<body") >= 0:
url_request = request.path
urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')]
found = False
for url in urls:
if url_request.find(url) >= 0:
found = True
break
if not found:
tracking_id = None
event_data = {'url': request.path_info, 'method': request.method}
if 'user_tracking_id' not in request.COOKIES:
tracking_id = generate_new_tracking_key()
response.set_cookie('user_tracking_id', tracking_id)
register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request)
#set javascript callback behavior to check if the user has disabled cookies
response.set_cookie('user_tracking_verify', tracking_id)
else:
tracking_id = request.COOKIES['user_tracking_id']
register_event(tracking_id=tracking_id, event_name='server_middleware_page_view',event_data=event_data, request=request)
return response
|
[
"tracking.register_event",
"django.core.urlresolvers.reverse",
"tracking.generate_new_tracking_key"
] |
[((813, 852), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_tracking_register_event"""'], {}), "('user_tracking_register_event')\n", (820, 852), False, 'from django.core.urlresolvers import reverse\n'), ((854, 885), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_tracking_verify"""'], {}), "('user_tracking_verify')\n", (861, 885), False, 'from django.core.urlresolvers import reverse\n'), ((1867, 1993), 'tracking.register_event', 'register_event', ([], {'tracking_id': 'tracking_id', 'event_name': '"""server_middleware_page_view"""', 'event_data': 'event_data', 'request': 'request'}), "(tracking_id=tracking_id, event_name=\n 'server_middleware_page_view', event_data=event_data, request=request)\n", (1881, 1993), False, 'from tracking import generate_new_tracking_key, register_event\n'), ((1332, 1359), 'tracking.generate_new_tracking_key', 'generate_new_tracking_key', ([], {}), '()\n', (1357, 1359), False, 'from tracking import generate_new_tracking_key, register_event\n'), ((1463, 1567), 'tracking.register_event', 'register_event', ([], {'tracking_id': 'tracking_id', 'event_name': '"""server_middleware_set_cookie"""', 'request': 'request'}), "(tracking_id=tracking_id, event_name=\n 'server_middleware_set_cookie', request=request)\n", (1477, 1567), False, 'from tracking import generate_new_tracking_key, register_event\n')]
|
#!/usr/bin/python
import re
import subprocess
class workspaces():
@staticmethod
def _cmd(*args):
return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
@staticmethod
def get_display_size():
size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace("\n", "")))[8].split('x')
return {"x": int(size[0]), "y": int(size[1])}
@staticmethod
def get_workspace_count():
total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace("\n", "")))[3].split('x')
total_size = [int(x) for x in total_size]
display = workspaces.get_display_size()
return {"x": int(total_size[0]/display['x']), "y": int(total_size[1]/display['y'])}
@staticmethod
def _workspace_coords_to_screen_coords(x, y):
disp_size = workspaces.get_display_size()
workspace_size = workspaces.get_workspace_count()
x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1 - x)
y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1- y)
return {"x": x_coord, "y": y_coord}
@staticmethod
def move_window(id, desk_x, desk_y):
coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y)
subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1'])
@staticmethod
def get_windows():
windows = workspaces._cmd('wmctrl', '-l').split("\n")
lines = [re.split(' *', desc, 3) for desc in windows]
return [dict(zip(['id', 'desktop', 'machine', 'title'], line)) for line in lines]
|
[
"subprocess.Popen",
"re.split"
] |
[((1480, 1503), 're.split', 're.split', (['""" *"""', 'desc', '(3)'], {}), "(' *', desc, 3)\n", (1488, 1503), False, 'import re\n'), ((121, 167), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (137, 167), False, 'import subprocess\n')]
|
import configargparse
def parse_args() -> dict:
parser = configargparse.ArgParser(default_config_files=['config.ini'])
parser.add_argument('--madmin_url', required=True, type=str)
parser.add_argument('--madmin_user', required=False, default='', type=str)
parser.add_argument('--madmin_password', required=False, default='', type=str)
args, unknown = parser.parse_known_args()
return {'madmin_url': args.madmin_url.rstrip('/'),
'madmin_user': args.madmin_user.strip(),
'madmin_password': args.madmin_password.strip()}
|
[
"configargparse.ArgParser"
] |
[((63, 124), 'configargparse.ArgParser', 'configargparse.ArgParser', ([], {'default_config_files': "['config.ini']"}), "(default_config_files=['config.ini'])\n", (87, 124), False, 'import configargparse\n')]
|
# -*- coding: utf-8 -*-
## ---------------------------------------------------------------------------
## Copyright 2019 Dynatrace LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## ---------------------------------------------------------------------------
"""Unit tests for the DAG class."""
from collections import defaultdict
import os
import pathlib
import pytest
from alyeska.compose import Task, DAG
from alyeska.compose.exceptions import CyclicGraphError
from test_compose_globals import (
COMPOSE_SMALL,
COMPOSE_BIG,
COMPOSE_CYCLE,
COMPOSE_TRICKY,
)
# ----------------------------------------------------------------------------
# Helper Functions
# ----------------------------------------------------------------------------
def get_two_tasks():
return (Task("A.py", env="test-env"), Task("B.py", env="test-env"))
# ----------------------------------------------------------------------------
# DAG magic methods
# ----------------------------------------------------------------------------
def test__validate_dependency():
make_tea = Task("make_tea.py", "test-env")
drink_tea = Task("drink_tea.py", "test-env")
with pytest.raises(TypeError):
DAG.validate_dependency([1, 2, 3])
with pytest.raises(ValueError):
DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]}))
with pytest.raises(ValueError):
DAG.validate_dependency({Task: {1, 2, 3}})
DAG.validate_dependency({make_tea: drink_tea})
DAG.validate_dependency({make_tea: {drink_tea, drink_tea}})
def test__DAG_init():
DAG()
# init with dependencies
make_tea = Task("make_tea.py", "test-env")
drink_tea = Task("drink_tea.py", "test-env")
dag = DAG(tasks=make_tea)
assert len(dag.tasks) == 1
dag = DAG(tasks={drink_tea, make_tea})
assert len(dag.tasks) == 2
dag = DAG(upstream_dependencies={drink_tea: make_tea})
assert len(dag.tasks) == 2
dag = DAG(downstream_dependencies={make_tea: drink_tea})
assert len(dag.tasks) == 2
def test__DAG_repr():
p = pathlib.Path("make_tea.py")
make_tea = Task(p, "test-env")
dag = DAG()
dag.add_task(make_tea)
assert repr(dag) == "".join(["DAG({Task(", p.resolve().as_posix(), ")})"])
# ----------------------------------------------------------------------------
# DAG.tasks
# ----------------------------------------------------------------------------
def test__DAG_add_task():
A, B = get_two_tasks()
dag = DAG()
dag.add_task(A)
assert dag.tasks == {A}, "Test Task was not added to the DAG"
def test__DAG_add_tasks():
A, B = get_two_tasks()
C = Task("C.py")
dag = DAG()
dag.add_tasks({A, B})
assert dag.tasks == {A, B}, "Test Tasks were not added to the DAG"
dag.add_tasks(C)
assert dag.tasks == {A, B, C}
def test__DAG_remove_task():
A, B = get_two_tasks()
dag = DAG()
dag.add_tasks({A, B})
dag.remove_task(A)
assert dag.tasks == {B}
def test__DAG_remove_tasks():
A, B = get_two_tasks()
C = Task("C.py")
dag = DAG()
dag.add_tasks({A, B, C})
dag.remove_tasks({A, B})
assert dag.tasks == {C}
dag.remove_tasks(C)
assert dag.tasks == set()
# ----------------------------------------------------------------------------
# add dependencies
# ----------------------------------------------------------------------------
def test__DAG_add_dependency():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, A)
assert dag._edges[A] == set([B])
def test__DAG_add_dependency_detect_cycle():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, A)
with pytest.raises(CyclicGraphError):
dag.add_dependency(A, B)
def test__DAG_add_dependencies():
A, B = get_two_tasks()
C = Task("C.py", env="test-env")
dag = DAG()
dag.add_dependencies({B: A})
assert dag._edges[A] == set([B])
dag = DAG()
dag.add_dependencies({C: {A, B}})
assert dag._edges[A] == set([C])
assert dag._edges[B] == set([C])
def test__DAG_add_dependency_detect_cycle2():
A, B = get_two_tasks()
C = Task("C.py", env="test-env")
dag = DAG()
with pytest.raises(CyclicGraphError):
dag.add_dependencies({A: C, B: A, C: B})
# ----------------------------------------------------------------------------
# methods
# ----------------------------------------------------------------------------
def test__DAG_get_downstream():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_downstream() is not None
assert dag.get_downstream()[A] == {B}
assert dag.get_downstream() == {A: {B}}, "Task B is not downstream"
def test__DAG_get_upstream():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_upstream() is not None
assert dag.get_upstream()[B] == {A}
assert dag.get_upstream() == {B: {A}}, "Task A is not upstream"
def test__DAG_get_sources():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_sources() is not None
assert dag.get_sources() == {A}
def test__DAG_get_sinks():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_sinks() is not None
assert dag.get_sinks() == {B}
def test__DAG_is_cyclic():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert not dag.is_cyclic(), "acyclic graph idenfied as cyclic"
with pytest.raises(CyclicGraphError):
dag.add_dependency(A, depends_on=B)
def test__DAG_from_yaml():
DAG.from_yaml(COMPOSE_SMALL)
with pytest.raises(CyclicGraphError):
DAG.from_yaml(COMPOSE_CYCLE)
dag = DAG.from_yaml(COMPOSE_TRICKY)
assert len(dag.tasks) > 0
|
[
"alyeska.compose.DAG",
"alyeska.compose.DAG.validate_dependency",
"alyeska.compose.Task",
"collections.defaultdict",
"pathlib.Path",
"pytest.raises",
"alyeska.compose.DAG.from_yaml"
] |
[((1596, 1627), 'alyeska.compose.Task', 'Task', (['"""make_tea.py"""', '"""test-env"""'], {}), "('make_tea.py', 'test-env')\n", (1600, 1627), False, 'from alyeska.compose import Task, DAG\n'), ((1644, 1676), 'alyeska.compose.Task', 'Task', (['"""drink_tea.py"""', '"""test-env"""'], {}), "('drink_tea.py', 'test-env')\n", (1648, 1676), False, 'from alyeska.compose import Task, DAG\n'), ((1961, 2007), 'alyeska.compose.DAG.validate_dependency', 'DAG.validate_dependency', (['{make_tea: drink_tea}'], {}), '({make_tea: drink_tea})\n', (1984, 2007), False, 'from alyeska.compose import Task, DAG\n'), ((2012, 2071), 'alyeska.compose.DAG.validate_dependency', 'DAG.validate_dependency', (['{make_tea: {drink_tea, drink_tea}}'], {}), '({make_tea: {drink_tea, drink_tea}})\n', (2035, 2071), False, 'from alyeska.compose import Task, DAG\n'), ((2100, 2105), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (2103, 2105), False, 'from alyeska.compose import Task, DAG\n'), ((2151, 2182), 'alyeska.compose.Task', 'Task', (['"""make_tea.py"""', '"""test-env"""'], {}), "('make_tea.py', 'test-env')\n", (2155, 2182), False, 'from alyeska.compose import Task, DAG\n'), ((2199, 2231), 'alyeska.compose.Task', 'Task', (['"""drink_tea.py"""', '"""test-env"""'], {}), "('drink_tea.py', 'test-env')\n", (2203, 2231), False, 'from alyeska.compose import Task, DAG\n'), ((2243, 2262), 'alyeska.compose.DAG', 'DAG', ([], {'tasks': 'make_tea'}), '(tasks=make_tea)\n', (2246, 2262), False, 'from alyeska.compose import Task, DAG\n'), ((2305, 2337), 'alyeska.compose.DAG', 'DAG', ([], {'tasks': '{drink_tea, make_tea}'}), '(tasks={drink_tea, make_tea})\n', (2308, 2337), False, 'from alyeska.compose import Task, DAG\n'), ((2380, 2428), 'alyeska.compose.DAG', 'DAG', ([], {'upstream_dependencies': '{drink_tea: make_tea}'}), '(upstream_dependencies={drink_tea: make_tea})\n', (2383, 2428), False, 'from alyeska.compose import Task, DAG\n'), ((2471, 2521), 'alyeska.compose.DAG', 'DAG', ([], {'downstream_dependencies': '{make_tea: drink_tea}'}), '(downstream_dependencies={make_tea: drink_tea})\n', (2474, 2521), False, 'from alyeska.compose import Task, DAG\n'), ((2585, 2612), 'pathlib.Path', 'pathlib.Path', (['"""make_tea.py"""'], {}), "('make_tea.py')\n", (2597, 2612), False, 'import pathlib\n'), ((2628, 2647), 'alyeska.compose.Task', 'Task', (['p', '"""test-env"""'], {}), "(p, 'test-env')\n", (2632, 2647), False, 'from alyeska.compose import Task, DAG\n'), ((2658, 2663), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (2661, 2663), False, 'from alyeska.compose import Task, DAG\n'), ((3006, 3011), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (3009, 3011), False, 'from alyeska.compose import Task, DAG\n'), ((3163, 3175), 'alyeska.compose.Task', 'Task', (['"""C.py"""'], {}), "('C.py')\n", (3167, 3175), False, 'from alyeska.compose import Task, DAG\n'), ((3186, 3191), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (3189, 3191), False, 'from alyeska.compose import Task, DAG\n'), ((3416, 3421), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (3419, 3421), False, 'from alyeska.compose import Task, DAG\n'), ((3567, 3579), 'alyeska.compose.Task', 'Task', (['"""C.py"""'], {}), "('C.py')\n", (3571, 3579), False, 'from alyeska.compose import Task, DAG\n'), ((3590, 3595), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (3593, 3595), False, 'from alyeska.compose import Task, DAG\n'), ((3985, 3990), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (3988, 3990), False, 'from alyeska.compose import Task, DAG\n'), ((4141, 4146), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (4144, 4146), False, 'from alyeska.compose import Task, DAG\n'), ((4322, 4350), 'alyeska.compose.Task', 'Task', (['"""C.py"""'], {'env': '"""test-env"""'}), "('C.py', env='test-env')\n", (4326, 4350), False, 'from alyeska.compose import Task, DAG\n'), ((4361, 4366), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (4364, 4366), False, 'from alyeska.compose import Task, DAG\n'), ((4448, 4453), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (4451, 4453), False, 'from alyeska.compose import Task, DAG\n'), ((4649, 4677), 'alyeska.compose.Task', 'Task', (['"""C.py"""'], {'env': '"""test-env"""'}), "('C.py', env='test-env')\n", (4653, 4677), False, 'from alyeska.compose import Task, DAG\n'), ((4689, 4694), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (4692, 4694), False, 'from alyeska.compose import Task, DAG\n'), ((5025, 5030), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (5028, 5030), False, 'from alyeska.compose import Task, DAG\n'), ((5298, 5303), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (5301, 5303), False, 'from alyeska.compose import Task, DAG\n'), ((5562, 5567), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (5565, 5567), False, 'from alyeska.compose import Task, DAG\n'), ((5751, 5756), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (5754, 5756), False, 'from alyeska.compose import Task, DAG\n'), ((5936, 5941), 'alyeska.compose.DAG', 'DAG', ([], {}), '()\n', (5939, 5941), False, 'from alyeska.compose import Task, DAG\n'), ((6170, 6198), 'alyeska.compose.DAG.from_yaml', 'DAG.from_yaml', (['COMPOSE_SMALL'], {}), '(COMPOSE_SMALL)\n', (6183, 6198), False, 'from alyeska.compose import Task, DAG\n'), ((6290, 6319), 'alyeska.compose.DAG.from_yaml', 'DAG.from_yaml', (['COMPOSE_TRICKY'], {}), '(COMPOSE_TRICKY)\n', (6303, 6319), False, 'from alyeska.compose import Task, DAG\n'), ((1306, 1334), 'alyeska.compose.Task', 'Task', (['"""A.py"""'], {'env': '"""test-env"""'}), "('A.py', env='test-env')\n", (1310, 1334), False, 'from alyeska.compose import Task, DAG\n'), ((1336, 1364), 'alyeska.compose.Task', 'Task', (['"""B.py"""'], {'env': '"""test-env"""'}), "('B.py', env='test-env')\n", (1340, 1364), False, 'from alyeska.compose import Task, DAG\n'), ((1687, 1711), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1700, 1711), False, 'import pytest\n'), ((1721, 1755), 'alyeska.compose.DAG.validate_dependency', 'DAG.validate_dependency', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1744, 1755), False, 'from alyeska.compose import Task, DAG\n'), ((1766, 1791), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1779, 1791), False, 'import pytest\n'), ((1878, 1903), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1891, 1903), False, 'import pytest\n'), ((1913, 1955), 'alyeska.compose.DAG.validate_dependency', 'DAG.validate_dependency', (['{Task: {1, 2, 3}}'], {}), '({Task: {1, 2, 3}})\n', (1936, 1955), False, 'from alyeska.compose import Task, DAG\n'), ((4185, 4216), 'pytest.raises', 'pytest.raises', (['CyclicGraphError'], {}), '(CyclicGraphError)\n', (4198, 4216), False, 'import pytest\n'), ((4704, 4735), 'pytest.raises', 'pytest.raises', (['CyclicGraphError'], {}), '(CyclicGraphError)\n', (4717, 4735), False, 'import pytest\n'), ((6060, 6091), 'pytest.raises', 'pytest.raises', (['CyclicGraphError'], {}), '(CyclicGraphError)\n', (6073, 6091), False, 'import pytest\n'), ((6209, 6240), 'pytest.raises', 'pytest.raises', (['CyclicGraphError'], {}), '(CyclicGraphError)\n', (6222, 6240), False, 'import pytest\n'), ((6250, 6278), 'alyeska.compose.DAG.from_yaml', 'DAG.from_yaml', (['COMPOSE_CYCLE'], {}), '(COMPOSE_CYCLE)\n', (6263, 6278), False, 'from alyeska.compose import Task, DAG\n'), ((1825, 1866), 'collections.defaultdict', 'defaultdict', (['set', '{make_tea: [drink_tea]}'], {}), '(set, {make_tea: [drink_tea]})\n', (1836, 1866), False, 'from collections import defaultdict\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# basic import
import os
import os.path as op
import sys
import time
sys.path.insert(0, op.join(op.dirname(__file__),'..','..'))
# python libs
import numpy as np
import xarray as xr
# custom libs
from teslakit.project_site import PathControl
from teslakit.extremes import FitGEV_KMA_Frechet
# --------------------------------------
# Test data storage
pc = PathControl()
p_tests = pc.p_test_data
p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet')
# input
p_npz = op.join(p_test, 'swell_1_Hs.npz')
# --------------------------------------
# Load data
npzf = np.load(p_npz)
bmus = npzf['arr_0']
n_clusters = npzf['arr_1']
var_wvs = npzf['arr_2']
print(bmus)
print(n_clusters)
print(var_wvs)
print()
# TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92
gp_pars = FitGEV_KMA_Frechet(
bmus, n_clusters, var_wvs)
print(gp_pars)
|
[
"numpy.load",
"os.path.dirname",
"teslakit.extremes.FitGEV_KMA_Frechet",
"teslakit.project_site.PathControl",
"os.path.join"
] |
[((408, 421), 'teslakit.project_site.PathControl', 'PathControl', ([], {}), '()\n', (419, 421), False, 'from teslakit.project_site import PathControl\n'), ((456, 515), 'os.path.join', 'op.join', (['p_tests', '"""ClimateEmulator"""', '"""gev_fit_kma_fretchet"""'], {}), "(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet')\n", (463, 515), True, 'import os.path as op\n'), ((533, 566), 'os.path.join', 'op.join', (['p_test', '"""swell_1_Hs.npz"""'], {}), "(p_test, 'swell_1_Hs.npz')\n", (540, 566), True, 'import os.path as op\n'), ((629, 643), 'numpy.load', 'np.load', (['p_npz'], {}), '(p_npz)\n', (636, 643), True, 'import numpy as np\n'), ((842, 887), 'teslakit.extremes.FitGEV_KMA_Frechet', 'FitGEV_KMA_Frechet', (['bmus', 'n_clusters', 'var_wvs'], {}), '(bmus, n_clusters, var_wvs)\n', (860, 887), False, 'from teslakit.extremes import FitGEV_KMA_Frechet\n'), ((143, 163), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (153, 163), True, 'import os.path as op\n')]
|
'''
Authentication urls for ToDos Users
Author: <NAME>
'''
from django.conf.urls import url
from . import views
# Authentiction urls
urlpatterns = [
url(r'^login/', views._login),
url(r'^signup/', views._register),
url(r'^change_password/', views._changePassword),
url(r'^logout/', views._logout),
url(r'^upload/', views._upload),
url(r'^profile/', views._profile),
# url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\w]+)', views._activate),
# url(r'^resend_activation_email/(?P<id>[0-9]+)', views.resend_activation_email),
]
|
[
"django.conf.urls.url"
] |
[((163, 191), 'django.conf.urls.url', 'url', (['"""^login/"""', 'views._login'], {}), "('^login/', views._login)\n", (166, 191), False, 'from django.conf.urls import url\n'), ((198, 230), 'django.conf.urls.url', 'url', (['"""^signup/"""', 'views._register'], {}), "('^signup/', views._register)\n", (201, 230), False, 'from django.conf.urls import url\n'), ((237, 284), 'django.conf.urls.url', 'url', (['"""^change_password/"""', 'views._changePassword'], {}), "('^change_password/', views._changePassword)\n", (240, 284), False, 'from django.conf.urls import url\n'), ((291, 321), 'django.conf.urls.url', 'url', (['"""^logout/"""', 'views._logout'], {}), "('^logout/', views._logout)\n", (294, 321), False, 'from django.conf.urls import url\n'), ((328, 358), 'django.conf.urls.url', 'url', (['"""^upload/"""', 'views._upload'], {}), "('^upload/', views._upload)\n", (331, 358), False, 'from django.conf.urls import url\n'), ((365, 397), 'django.conf.urls.url', 'url', (['"""^profile/"""', 'views._profile'], {}), "('^profile/', views._profile)\n", (368, 397), False, 'from django.conf.urls import url\n')]
|
from typing import List
from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher
class AutoFetcher:
"""A factory function which returns the correct data fetcher for the given `tasks`.
A `Fetcher` is returned which requests all the data relevant to `tasks` in a single call to its
`fetch` method. This ensures the API endpoints are only queried once, rather than for every
task individually.
NOTE: It is assumed that a `benchmark.json` file already exists, with at least the gene IDs
present. This file can be created by running the `get_protein_ids.py` file in `scripts`.
"""
def __new__(cls, tasks: List[str]) -> Fetcher:
fetcher = Fetcher()
uniprot_fetcher = UniprotFetcher()
for task in tasks:
if task.startswith("sequence"):
uniprot_fetcher.register(SequenceFetcher)
if task.startswith("subcellular_localization"):
uniprot_fetcher.register(LocalizationFetcher)
fetcher.register(uniprot_fetcher)
return fetcher
|
[
"geneeval.fetcher.fetchers.UniprotFetcher",
"geneeval.fetcher.fetchers.Fetcher"
] |
[((723, 732), 'geneeval.fetcher.fetchers.Fetcher', 'Fetcher', ([], {}), '()\n', (730, 732), False, 'from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher\n'), ((760, 776), 'geneeval.fetcher.fetchers.UniprotFetcher', 'UniprotFetcher', ([], {}), '()\n', (774, 776), False, 'from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher\n')]
|
import os, json
from typing import Dict, Iterable
from azure.cosmos import (CosmosClient,
PartitionKey,
ContainerProxy,
DatabaseProxy)
SETTINGS = dict(
HOST = os.getenv('COSMOSDB_HOST'),
MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'),
DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID')
)
class DataBaseClient():
def __init__(self, container_id, partition_key) -> None:
super().__init__()
self.container_id = container_id
self.partition_key = partition_key
def get_cosmosdb_client(self) -> CosmosClient:
client = CosmosClient(
endpoint_url=SETTINGS['HOST'],
auth={'masterKey': SETTINGS['MASTER_KEY']}
)
return client
def get_cosmosdb_database(self) -> DatabaseProxy:
client = self.get_cosmosdb_client()
database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID'])
return database
def get_cosmosdb_container(self) -> ContainerProxy:
database = self.get_cosmosdb_database()
container = database.create_container_if_not_exists(
id=self.container_id,
partition_key=PartitionKey(path=self.partition_key)
)
return container
def create_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.create_item(item)
return item
def upsert_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.upsert_item(item)
return item
def delete_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.delete_item(item)
return item
def get_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.read_item(item)
return item
def query_items_cosmosdb(self, query: str) -> Iterable:
container = self.get_cosmosdb_container()
items = container.query_items(query, enable_cross_partition_query=True)
return items
|
[
"azure.cosmos.CosmosClient",
"azure.cosmos.PartitionKey",
"os.getenv"
] |
[((242, 268), 'os.getenv', 'os.getenv', (['"""COSMOSDB_HOST"""'], {}), "('COSMOSDB_HOST')\n", (251, 268), False, 'import os, json\n'), ((287, 319), 'os.getenv', 'os.getenv', (['"""COSMOSDB_MASTER_KEY"""'], {}), "('COSMOSDB_MASTER_KEY')\n", (296, 319), False, 'import os, json\n'), ((339, 372), 'os.getenv', 'os.getenv', (['"""COSMOSDB_DATABASE_ID"""'], {}), "('COSMOSDB_DATABASE_ID')\n", (348, 372), False, 'import os, json\n'), ((663, 755), 'azure.cosmos.CosmosClient', 'CosmosClient', ([], {'endpoint_url': "SETTINGS['HOST']", 'auth': "{'masterKey': SETTINGS['MASTER_KEY']}"}), "(endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS[\n 'MASTER_KEY']})\n", (675, 755), False, 'from azure.cosmos import CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy\n'), ((1245, 1282), 'azure.cosmos.PartitionKey', 'PartitionKey', ([], {'path': 'self.partition_key'}), '(path=self.partition_key)\n', (1257, 1282), False, 'from azure.cosmos import CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy\n')]
|
import os
import sys
import cv2
import time
import caffe
import numpy as np
import config
sys.path.append('../')
from fast_mtcnn import fast_mtcnn
from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72
from baidu import call_baidu_api
def create_net(model_dir, iter_num):
model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num)
proto_path = 'landmark.prototxt'
return caffe.Net(proto_path, model_path, caffe.TEST)
if __name__ == '__main__':
iter_num = int(sys.argv[1])
img_path = sys.argv[2]
model_dir = config.MODEL_DIR
if len(sys.argv) > 3:
model_dir = sys.argv[3]
img = cv2.imread(img_path)
net = create_net(model_dir, iter_num)
mtcnn = fast_mtcnn()
boxes = mtcnn(img_path)
for box in boxes:
if not is_valid_facebox(box):
continue
exp_box = expand_mtcnn_box(img, box)
cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]]
baidu_result = call_baidu_api(cropped, '')
baidu_lm = extract_baidu_lm72(baidu_result[0][-1])
for x, y in baidu_lm:
x = int(x + exp_box[0])
y = int(y + exp_box[1])
cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1)
h, w, _ = cropped.shape
cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE))
cropped = np.swapaxes(cropped, 0, 2)
cropped = (cropped - 127.5) / 127.5
net.blobs['data'].data[0] = cropped
out = net.forward()
landmark = out['Dense2'][0]
for pt in landmark.reshape((config.LANDMARK_SIZE, 2)):
x, y = pt
x = x * w + exp_box[0]
y = y * h + exp_box[1]
cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1)
time.sleep(0.5)
cv2.imwrite('result.jpg', img)
|
[
"sys.path.append",
"gen_landmark.extract_baidu_lm72",
"fast_mtcnn.fast_mtcnn",
"baidu.call_baidu_api",
"gen_landmark.is_valid_facebox",
"cv2.imwrite",
"gen_landmark.expand_mtcnn_box",
"time.sleep",
"cv2.imread",
"numpy.swapaxes",
"caffe.Net",
"os.path.join",
"cv2.resize"
] |
[((90, 112), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (105, 112), False, 'import sys\n'), ((315, 380), 'os.path.join', 'os.path.join', (['model_dir', "('landmark_iter_%d.caffemodel' % iter_num)"], {}), "(model_dir, 'landmark_iter_%d.caffemodel' % iter_num)\n", (327, 380), False, 'import os\n'), ((429, 474), 'caffe.Net', 'caffe.Net', (['proto_path', 'model_path', 'caffe.TEST'], {}), '(proto_path, model_path, caffe.TEST)\n', (438, 474), False, 'import caffe\n'), ((664, 684), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (674, 684), False, 'import cv2\n'), ((740, 752), 'fast_mtcnn.fast_mtcnn', 'fast_mtcnn', ([], {}), '()\n', (750, 752), False, 'from fast_mtcnn import fast_mtcnn\n'), ((1807, 1837), 'cv2.imwrite', 'cv2.imwrite', (['"""result.jpg"""', 'img'], {}), "('result.jpg', img)\n", (1818, 1837), False, 'import cv2\n'), ((880, 906), 'gen_landmark.expand_mtcnn_box', 'expand_mtcnn_box', (['img', 'box'], {}), '(img, box)\n', (896, 906), False, 'from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72\n'), ((998, 1025), 'baidu.call_baidu_api', 'call_baidu_api', (['cropped', '""""""'], {}), "(cropped, '')\n", (1012, 1025), False, 'from baidu import call_baidu_api\n'), ((1045, 1084), 'gen_landmark.extract_baidu_lm72', 'extract_baidu_lm72', (['baidu_result[0][-1]'], {}), '(baidu_result[0][-1])\n', (1063, 1084), False, 'from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72\n'), ((1303, 1358), 'cv2.resize', 'cv2.resize', (['cropped', '(config.IMG_SIZE, config.IMG_SIZE)'], {}), '(cropped, (config.IMG_SIZE, config.IMG_SIZE))\n', (1313, 1358), False, 'import cv2\n'), ((1377, 1403), 'numpy.swapaxes', 'np.swapaxes', (['cropped', '(0)', '(2)'], {}), '(cropped, 0, 2)\n', (1388, 1403), True, 'import numpy as np\n'), ((1786, 1801), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1796, 1801), False, 'import time\n'), ((818, 839), 'gen_landmark.is_valid_facebox', 'is_valid_facebox', (['box'], {}), '(box)\n', (834, 839), False, 'from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72\n')]
|
import uuid
import mongoengine as db
from flask import current_app as app
from flask_mongoengine import MongoEngine
class Properties:
name = "example"
nic = "example-nic"
disk = "example-disk"
vmId = str(uuid.uuid4())
subId = str(uuid.uuid4())
rgroup = "example-resource-group"
availabilitySet = "example-availability-set"
ppg = "example-proximity-placement-group"
props = Properties()
store = MongoEngine(app._get_current_object())
class VirtualMachine(db.Document):
tags = db.DictField()
name = db.StringField(required=True)
location = db.StringField(required=True)
subscription = db.StringField(required=True)
resourceGroup = db.StringField(required=True)
rid = db.StringField(required=True, unique=True)
provisioningState = db.StringField(default='Succeeded')
type = db.StringField(default='Microsoft.Compute/virtualMachines')
properties = db.DictField(default={
"vmId": props.vmId,
"availabilitySet": {
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet"
},
"proximityPlacementGroup": {
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}"
},
"hardwareProfile": {
"vmSize": "Standard_DS3_v2"
},
"storageProfile": {
"imageReference": {
"publisher": "MicrosoftWindowsServer",
"offer": "WindowsServer",
"sku": "2016-Datacenter",
"version": "latest"
},
"osDisk": {
"osType": "Windows",
"name": "myOsDisk",
"createOption": "FromImage",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Premium_LRS",
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}"
},
"diskSizeGB": 30
},
"dataDisks": [
{
"lun": 0,
"name": "myDataDisk0",
"createOption": "Empty",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Premium_LRS",
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}"
},
"diskSizeGB": 30
},
{
"lun": 1,
"name": "myDataDisk1",
"createOption": "Attach",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Premium_LRS",
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}"
},
"diskSizeGB": 100
}
]
},
"userData": "RXhhbXBsZSBVc2VyRGF0YQ==",
"osProfile": {
"computerName": "myVM",
"adminUsername": "admin",
"windowsConfiguration": {
"provisionVMAgent": True,
"enableAutomaticUpdates": False
},
"secrets": []
},
"networkProfile": {
"networkInterfaces": [
{
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": True,
"storageUri": f"http://{props.name}.blob.core.windows.net"
}
},
"extensionsTimeBudget": "PT50M",
"provisioningState": "Succeeded"
})
meta = {'collection': 'virtualmachines'}
def __repr__(self):
return "VirtualMachine(%s)" % self.rid
def save(self, *args, **kwargs):
self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % (
self.subscription, self.resourceGroup, self.type, self.name)
super().save(args, kwargs)
|
[
"flask.current_app._get_current_object",
"mongoengine.StringField",
"uuid.uuid4",
"mongoengine.DictField"
] |
[((444, 469), 'flask.current_app._get_current_object', 'app._get_current_object', ([], {}), '()\n', (467, 469), True, 'from flask import current_app as app\n'), ((519, 533), 'mongoengine.DictField', 'db.DictField', ([], {}), '()\n', (531, 533), True, 'import mongoengine as db\n'), ((545, 574), 'mongoengine.StringField', 'db.StringField', ([], {'required': '(True)'}), '(required=True)\n', (559, 574), True, 'import mongoengine as db\n'), ((590, 619), 'mongoengine.StringField', 'db.StringField', ([], {'required': '(True)'}), '(required=True)\n', (604, 619), True, 'import mongoengine as db\n'), ((639, 668), 'mongoengine.StringField', 'db.StringField', ([], {'required': '(True)'}), '(required=True)\n', (653, 668), True, 'import mongoengine as db\n'), ((689, 718), 'mongoengine.StringField', 'db.StringField', ([], {'required': '(True)'}), '(required=True)\n', (703, 718), True, 'import mongoengine as db\n'), ((729, 771), 'mongoengine.StringField', 'db.StringField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (743, 771), True, 'import mongoengine as db\n'), ((796, 831), 'mongoengine.StringField', 'db.StringField', ([], {'default': '"""Succeeded"""'}), "(default='Succeeded')\n", (810, 831), True, 'import mongoengine as db\n'), ((843, 902), 'mongoengine.StringField', 'db.StringField', ([], {'default': '"""Microsoft.Compute/virtualMachines"""'}), "(default='Microsoft.Compute/virtualMachines')\n", (857, 902), True, 'import mongoengine as db\n'), ((920, 3019), 'mongoengine.DictField', 'db.DictField', ([], {'default': "{'vmId': props.vmId, 'availabilitySet': {'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet'\n }, 'proximityPlacementGroup': {'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}'\n }, 'hardwareProfile': {'vmSize': 'Standard_DS3_v2'}, 'storageProfile':\n {'imageReference': {'publisher': 'MicrosoftWindowsServer', 'offer':\n 'WindowsServer', 'sku': '2016-Datacenter', 'version': 'latest'},\n 'osDisk': {'osType': 'Windows', 'name': 'myOsDisk', 'createOption':\n 'FromImage', 'caching': 'ReadWrite', 'managedDisk': {\n 'storageAccountType': 'Premium_LRS', 'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}'\n }, 'diskSizeGB': 30}, 'dataDisks': [{'lun': 0, 'name': 'myDataDisk0',\n 'createOption': 'Empty', 'caching': 'ReadWrite', 'managedDisk': {\n 'storageAccountType': 'Premium_LRS', 'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}'\n }, 'diskSizeGB': 30}, {'lun': 1, 'name': 'myDataDisk1', 'createOption':\n 'Attach', 'caching': 'ReadWrite', 'managedDisk': {'storageAccountType':\n 'Premium_LRS', 'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}'\n }, 'diskSizeGB': 100}]}, 'userData': 'RXhhbXBsZSBVc2VyRGF0YQ==',\n 'osProfile': {'computerName': 'myVM', 'adminUsername': 'admin',\n 'windowsConfiguration': {'provisionVMAgent': True,\n 'enableAutomaticUpdates': False}, 'secrets': []}, 'networkProfile': {\n 'networkInterfaces': [{'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}'\n }]}, 'diagnosticsProfile': {'bootDiagnostics': {'enabled': True,\n 'storageUri': f'http://{props.name}.blob.core.windows.net'}},\n 'extensionsTimeBudget': 'PT50M', 'provisioningState': 'Succeeded'}"}), "(default={'vmId': props.vmId, 'availabilitySet': {'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet'\n }, 'proximityPlacementGroup': {'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}'\n }, 'hardwareProfile': {'vmSize': 'Standard_DS3_v2'}, 'storageProfile':\n {'imageReference': {'publisher': 'MicrosoftWindowsServer', 'offer':\n 'WindowsServer', 'sku': '2016-Datacenter', 'version': 'latest'},\n 'osDisk': {'osType': 'Windows', 'name': 'myOsDisk', 'createOption':\n 'FromImage', 'caching': 'ReadWrite', 'managedDisk': {\n 'storageAccountType': 'Premium_LRS', 'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}'\n }, 'diskSizeGB': 30}, 'dataDisks': [{'lun': 0, 'name': 'myDataDisk0',\n 'createOption': 'Empty', 'caching': 'ReadWrite', 'managedDisk': {\n 'storageAccountType': 'Premium_LRS', 'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}'\n }, 'diskSizeGB': 30}, {'lun': 1, 'name': 'myDataDisk1', 'createOption':\n 'Attach', 'caching': 'ReadWrite', 'managedDisk': {'storageAccountType':\n 'Premium_LRS', 'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}'\n }, 'diskSizeGB': 100}]}, 'userData': 'RXhhbXBsZSBVc2VyRGF0YQ==',\n 'osProfile': {'computerName': 'myVM', 'adminUsername': 'admin',\n 'windowsConfiguration': {'provisionVMAgent': True,\n 'enableAutomaticUpdates': False}, 'secrets': []}, 'networkProfile': {\n 'networkInterfaces': [{'id':\n f'/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}'\n }]}, 'diagnosticsProfile': {'bootDiagnostics': {'enabled': True,\n 'storageUri': f'http://{props.name}.blob.core.windows.net'}},\n 'extensionsTimeBudget': 'PT50M', 'provisioningState': 'Succeeded'})\n", (932, 3019), True, 'import mongoengine as db\n'), ((224, 236), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (234, 236), False, 'import uuid\n'), ((254, 266), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (264, 266), False, 'import uuid\n')]
|
from sendgrid.helpers.mail import Mail
from CommonCode.strings import Strings
class SendGridEmailHelper:
def builderToMail(self,emailBuilder):
fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId);
toids = list()
for ids in emailBuilder.toId:
toids.append(Strings.getFormattedEmail(builder=ids))
subject = emailBuilder.subject
content = emailBuilder.content
return Mail(from_email=fromId,
to_emails=toids,
subject=subject,
html_content=content)
|
[
"sendgrid.helpers.mail.Mail",
"CommonCode.strings.Strings.getFormattedEmail"
] |
[((167, 221), 'CommonCode.strings.Strings.getFormattedEmail', 'Strings.getFormattedEmail', ([], {'builder': 'emailBuilder.fromId'}), '(builder=emailBuilder.fromId)\n', (192, 221), False, 'from CommonCode.strings import Strings\n'), ((442, 521), 'sendgrid.helpers.mail.Mail', 'Mail', ([], {'from_email': 'fromId', 'to_emails': 'toids', 'subject': 'subject', 'html_content': 'content'}), '(from_email=fromId, to_emails=toids, subject=subject, html_content=content)\n', (446, 521), False, 'from sendgrid.helpers.mail import Mail\n'), ((309, 347), 'CommonCode.strings.Strings.getFormattedEmail', 'Strings.getFormattedEmail', ([], {'builder': 'ids'}), '(builder=ids)\n', (334, 347), False, 'from CommonCode.strings import Strings\n')]
|
import json
import os
from typing import TextIO, Hashable, Iterator
from dateutil.parser import isoparse
from utils.sorts import Sort, Group
def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort:
"""
Extracts the information from a trello board json file.
A card_mapping maps the card prompts to an ID which is usually more
useful for analysis. Card prompts will be mapped to the given ID when
parsing and used in place of the card prompt.
:param f: a TextIO Stream of the trello board json file
:param card_mapping: a mapping of card names to card ids
:return: a Sort object
"""
data = json.load(f)
trello_lists = data['lists']
trello_lists.sort(key=lambda x: x['pos'])
# Cards are linked to their lists by list ID. So, a temporary mapping
# from list IDs to groups is needed.
groups_by_id = {}
for trello_list in trello_lists:
group_name = trello_list['name']
list_id = trello_list['id']
group = Group(group_name)
groups_by_id[list_id] = group
cards = data['cards']
# Participants may accidentally add cards which are then deleted, "closed".
cards = [card for card in cards if not card['closed']]
for card in cards:
group_id = card['idList']
group = groups_by_id[group_id]
# It may be more useful to map card prompts to an ID for analysis
if card_mapping is not None:
card_data = card_mapping[card['name']]
else:
card_data = card['name']
group.cards.add(card_data)
actions = data['actions']
actions.sort(key=lambda x: isoparse(x['date']))
# Only card moves, list creation, and list renaming are considered.
valid_actions = []
for action in actions:
action_data = action['data']
action_type = action['type']
# Card is moved
if action_type == 'updateCard' and 'listBefore' in action_data:
valid_actions.append(action)
# List is created
elif action_type == 'createList':
valid_actions.append(action)
# List is renamed
elif action_type == 'updateList' and 'name' in action_data['old']:
valid_actions.append(action)
# For the purposes of this study, sorts were considered to start when the
# first trello list was created. Sorts were considered to end when the
# last card move or list rename action was performed.
first_list = next(action for action in valid_actions
if action['type'] == 'createList')
start_time = isoparse(first_list['date'])
end_time = isoparse(actions[-1]['date'])
total_sort_time = end_time - start_time
# Empty groups are discarded.
groups = [group for group in groups_by_id.values() if group.cards]
sort_name = data['name']
cards = set(card_mapping.values())
sort = Sort(sort_name, groups, cards, total_sort_time)
return sort
def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]:
"""
Returns a list of paths to json files in the given directory. Nested
directories are not traversed.
:param path: a path to a directory
:return: the list of paths to json files in the given directory
"""
files = os.listdir(path)
for file in files:
file_path = os.path.join(path, file)
if os.path.isfile(file_path) and file.endswith('.json'):
yield file_path
def parse_sorts_in_dir(path: str,
card_mapping: dict[str, Hashable] = None) -> list[Sort]:
"""
Parses all sorts in the given directory. Nested directories are not
traversed. This is equivalent to calling parse_sort on each json file in
the given directory.
:param path: a path to a directory
:param card_mapping: an optional mapping of card names to card ids
:return: a list of Sort objects
"""
sorts = []
trello_json_paths = get_paths_to_jsons_in_dir(path)
for path in trello_json_paths:
with open(path, 'r') as f:
sort = parse_board(f, card_mapping)
sorts.append(sort)
return sorts
|
[
"utils.sorts.Group",
"json.load",
"utils.sorts.Sort",
"dateutil.parser.isoparse",
"os.path.isfile",
"os.path.join",
"os.listdir"
] |
[((649, 661), 'json.load', 'json.load', (['f'], {}), '(f)\n', (658, 661), False, 'import json\n'), ((2588, 2616), 'dateutil.parser.isoparse', 'isoparse', (["first_list['date']"], {}), "(first_list['date'])\n", (2596, 2616), False, 'from dateutil.parser import isoparse\n'), ((2632, 2661), 'dateutil.parser.isoparse', 'isoparse', (["actions[-1]['date']"], {}), "(actions[-1]['date'])\n", (2640, 2661), False, 'from dateutil.parser import isoparse\n'), ((2892, 2939), 'utils.sorts.Sort', 'Sort', (['sort_name', 'groups', 'cards', 'total_sort_time'], {}), '(sort_name, groups, cards, total_sort_time)\n', (2896, 2939), False, 'from utils.sorts import Sort, Group\n'), ((3261, 3277), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3271, 3277), False, 'import os\n'), ((1010, 1027), 'utils.sorts.Group', 'Group', (['group_name'], {}), '(group_name)\n', (1015, 1027), False, 'from utils.sorts import Sort, Group\n'), ((3321, 3345), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (3333, 3345), False, 'import os\n'), ((3357, 3382), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (3371, 3382), False, 'import os\n'), ((1639, 1658), 'dateutil.parser.isoparse', 'isoparse', (["x['date']"], {}), "(x['date'])\n", (1647, 1658), False, 'from dateutil.parser import isoparse\n')]
|
import abc
import logging
import math
import random
from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent
from orca.ports import InputPort, OutputPort
logger = logging.getLogger(__name__)
OUTPUT_PORT_NAME = "output"
class IOperator(abc.ABC):
def __init__(
self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False
):
self.x = x
self.y = y
self.name = name
self.description = description
self.ports = {}
self._grid = grid
self.is_passive = is_passive
self.do_draw = is_passive
self.glyph = glyph.upper() if is_passive else glyph
@abc.abstractmethod
def operation(self, frame, force=False):
"""Run the operator for the given frame and return the payload.
This may modify the grid.
Note: the frame is assumed to match the state of the grid given at
construction time."""
def __str__(self):
return self.name
def run(self, frame, force=False):
payload = self.operation(frame, force)
for port in self.ports.values():
if isinstance(port, OutputPort) and port.is_bang:
continue
logger.debug(
"Ops %s (%d, %d): locking port @ %d, %d",
self.name,
self.x,
self.y,
port.x,
port.y,
)
self._grid.lock(port.x, port.y)
output_port = self._output_port
if output_port:
if output_port.is_bang:
self._bang(payload)
else:
self._output(payload)
def erase(self):
self._grid.poke(self.x, self.y, DOT_GLYPH)
def explode(self):
self._grid.poke(self.x, self.y, BANG_GLYPH)
def has_neighbor(self, glyph):
for x, y in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if self._grid.peek(self.x + x, self.y + y) == glyph:
return True
return False
def move(self, offset_x, offset_y):
new_x = self.x + offset_x
new_y = self.y + offset_y
if not self._grid.is_inside(new_x, new_y):
self.explode()
return
collider = self._grid.peek(new_x, new_y)
if collider not in (BANG_GLYPH, DOT_GLYPH):
self.explode()
return
self.erase()
self.x += offset_x
self.y += offset_y
self._grid.poke(self.x, self.y, self.glyph)
if self._grid.is_inside(self.x, self.y):
self._grid.lock(self.x, self.y)
@property
def _output_port(self):
return self.ports.get(OUTPUT_PORT_NAME)
def _has_output_port(self):
return OUTPUT_PORT_NAME in self.ports
def _should_upper_case(self):
output_port = self._output_port
if output_port is None or not output_port.is_sensitive:
return False
else:
right_port = InputPort(self.x + 1, self.y)
value = self._grid.listen(right_port)
if value.lower() == value.upper() or value.upper() != value:
return False
else:
return True
def _bang(self, payload):
output_port = self._output_port
if output_port is None:
logger.warn("Trying to bang, but no output port.")
return
else:
glyph = BANG_GLYPH if payload else DOT_GLYPH
self._grid.poke(output_port.x, output_port.y, glyph)
def _output(self, glyph, port=None):
if port is None:
output_port = self._output_port
else:
output_port = port
if output_port is None:
logging.warn(
"No output port for operator %s @ (%d, %d)", self.name, self.x, self.y
)
elif glyph is None:
return
else:
if self._should_upper_case():
value = glyph.upper()
else:
value = glyph
self._grid.poke(output_port.x, output_port.y, value)
class Add(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid, x, y, "add", "Output sum of inputs", glyph="a", is_passive=is_passive
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
index = self._grid.listen_as_value(
self.ports["a"]
) + self._grid.listen_as_value(self.ports["b"])
return self._grid.key_of(index)
class Substract(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"substract",
"Output difference of inputs",
glyph="b",
is_passive=is_passive,
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
a = self._grid.listen_as_value(self.ports["a"])
b = self._grid.listen_as_value(self.ports["b"])
return self._grid.key_of(abs(b - a))
class Clock(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"clock",
"Outputs modulo of frame",
glyph="c",
is_passive=is_passive,
)
self.ports.update(
{
"rate": InputPort(x - 1, y, clamp=lambda x: max(1, x)),
"mod": InputPort(x + 1, y, default="8"),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
rate = self._grid.listen_as_value(self.ports["rate"])
mod = self._grid.listen_as_value(self.ports["mod"])
value = math.floor(frame / rate) % mod
return self._grid.key_of(value)
class Delay(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"delay",
"Bangs on module of frame",
glyph="d",
is_passive=is_passive,
)
self.ports.update(
{
"rate": InputPort(x - 1, y, clamp=lambda x: max(1, x)),
"mod": InputPort(x + 1, y, default="8"),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True),
}
)
def operation(self, frame, force=False):
rate = self._grid.listen_as_value(self.ports["rate"])
mod = self._grid.listen_as_value(self.ports["mod"])
value = frame % (mod * rate)
return value == 0 or mod == 1
class East(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"east",
"Move eastwards or bang",
glyph="e",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(1, 0)
self.is_passive = False
class Generator(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"generator",
"Write operands with offset",
glyph="g",
is_passive=is_passive,
)
self.ports.update(
{
"x": InputPort(x - 3, y),
"y": InputPort(x - 2, y),
"len": InputPort(x - 1, y, clamp=lambda x: max(x, 1)),
}
)
def operation(self, frame, force=False):
length = self._grid.listen_as_value(self.ports["len"])
x = self._grid.listen_as_value(self.ports["x"])
y = self._grid.listen_as_value(self.ports["y"]) + 1
for offset in range(length):
input_port = InputPort(self.x + offset + 1, self.y)
output_port = OutputPort(self.x + x + offset, self.y + y)
self.ports.update(
{
f"input{offset}": input_port,
f"output{offset}": output_port,
}
)
res = self._grid.listen(input_port)
self._output(res, output_port)
class Halt(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"half",
"Halts southward operator",
glyph="h",
is_passive=is_passive,
)
def operation(self, frame, force=False):
self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y)
class If(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"if",
"Bang if inputs are equal",
glyph="f",
is_passive=is_passive,
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True),
}
)
def operation(self, frame, force=False):
a = self._grid.listen(self.ports["a"])
b = self._grid.listen(self.ports["b"])
return a == b
class Increment(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"increment",
"Increment operator southward",
glyph="i",
is_passive=is_passive,
)
self.ports.update(
{
"step": InputPort(x - 1, y, default="1"),
"mod": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
step = self._grid.listen_as_value(self.ports["step"])
mod = self._grid.listen_as_value(self.ports["mod"])
out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME])
return self._grid.key_of((out + step) % (mod if mod > 0 else 36))
class Jumper(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"j",
"Outputs northward operator",
glyph="f",
is_passive=is_passive,
)
self.ports.update(
{
"val": InputPort(x, y - 1),
OUTPUT_PORT_NAME: OutputPort(x, y + 1),
}
)
def operation(self, frame, force=False):
self._grid.lock(self._output_port.x, self._output_port.y)
return self._grid.listen(self.ports["val"])
class Multiply(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"multiply",
"Output multiplication of inputs",
glyph="m",
is_passive=is_passive,
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
a = self._grid.listen_as_value(self.ports["a"])
b = self._grid.listen_as_value(self.ports["b"])
return self._grid.key_of(a * b)
class North(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"north",
"Move northward or bang",
glyph="n",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(0, -1)
self.is_passive = False
class Random(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"random",
"Outputs random value",
glyph="r",
is_passive=is_passive,
)
self.ports.update(
{
"min": InputPort(x - 1, y),
"max": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
low = self._grid.listen_as_value(self.ports["min"])
high = self._grid.listen_as_value(self.ports["max"])
value = random.randint(low, high)
return self._grid.key_of(value)
class South(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"south",
"Move southward or bang",
glyph="s",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(0, 1)
self.is_passive = False
class Track(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"track",
"Reads eastward operand",
glyph="t",
is_passive=is_passive,
)
self.ports.update(
{
"key": InputPort(x - 2, y),
"len": InputPort(x - 1, y, clamp=lambda x: max(1, x)),
OUTPUT_PORT_NAME: OutputPort(x, y + 1),
}
)
def operation(self, frame, force=False):
key = self._grid.listen_as_value(self.ports["key"])
length = self._grid.listen_as_value(self.ports["len"])
for offset in range(length):
self._grid.lock(self.x + offset + 1, self.y)
port = InputPort(self.x + 1 + key % length, self.y)
return self._grid.listen(port)
class West(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"west",
"Move westward or bang",
glyph="w",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(-1, 0)
self.is_passive = False
class Jymper(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"y",
"Outputs westward operator",
glyph="y",
is_passive=is_passive,
)
self.ports.update(
{
"val": InputPort(x - 1, y),
OUTPUT_PORT_NAME: OutputPort(x + 1, y),
}
)
def operation(self, frame, force=False):
self._grid.lock(self._output_port.x, self._output_port.y)
return self._grid.listen(self.ports["val"])
class Bang(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"bang",
"Bangs neighboring operands",
glyph=BANG_GLYPH,
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.do_draw = False
self.erase()
class Comment(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"comment",
"Halts line",
glyph=COMMENT_GLYPH,
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self._grid.lock(self.x, self.y)
for x in range(self.x + 1, self._grid.cols):
self._grid.lock(x, self.y)
if self._grid.peek(x, self.y) == self.glyph:
break
_NOTES_VALUES = ("C", "c", "D", "d", "E", "F", "f", "G", "g", "A", "a", "B")
NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)}
class Midi(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"midi",
"Send MIDI note",
glyph=":",
is_passive=True,
)
self.ports.update(
{
"channel": InputPort(self.x + 1, self.y),
"octave": InputPort(
self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8)
),
"note": InputPort(self.x + 3, self.y),
"velocity": InputPort(
self.x + 4, self.y, default="f", clamp=lambda x: min(max(0, x), 16)
),
"length": InputPort(
self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32)
),
}
)
def operation(self, frame, force=False):
if not self.has_neighbor(BANG_GLYPH) and not force:
return
for port_name in "channel", "octave", "note":
if self._grid.listen(self.ports[port_name]) == DOT_GLYPH:
return
note = self._grid.listen(self.ports["note"])
if not NOTE_TO_INDEX:
return
channel = self._grid.listen_as_value(self.ports["channel"])
if channel > 15:
return
octave = self._grid.listen_as_value(self.ports["octave"])
velocity = self._grid.listen_as_value(self.ports["velocity"])
length = self._grid.listen_as_value(self.ports["length"])
self._grid.push_midi(MidiNoteOnEvent(channel, octave, note, velocity, length))
|
[
"orca.grid.MidiNoteOnEvent",
"random.randint",
"logging.warn",
"math.floor",
"orca.ports.InputPort",
"orca.ports.OutputPort",
"logging.getLogger"
] |
[((185, 212), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'import logging\n'), ((13052, 13077), 'random.randint', 'random.randint', (['low', 'high'], {}), '(low, high)\n', (13066, 13077), False, 'import random\n'), ((14333, 14377), 'orca.ports.InputPort', 'InputPort', (['(self.x + 1 + key % length)', 'self.y'], {}), '(self.x + 1 + key % length, self.y)\n', (14342, 14377), False, 'from orca.ports import InputPort, OutputPort\n'), ((2974, 3003), 'orca.ports.InputPort', 'InputPort', (['(self.x + 1)', 'self.y'], {}), '(self.x + 1, self.y)\n', (2983, 3003), False, 'from orca.ports import InputPort, OutputPort\n'), ((3724, 3812), 'logging.warn', 'logging.warn', (['"""No output port for operator %s @ (%d, %d)"""', 'self.name', 'self.x', 'self.y'], {}), "('No output port for operator %s @ (%d, %d)', self.name, self.x,\n self.y)\n", (3736, 3812), False, 'import logging\n'), ((6202, 6226), 'math.floor', 'math.floor', (['(frame / rate)'], {}), '(frame / rate)\n', (6212, 6226), False, 'import math\n'), ((8285, 8323), 'orca.ports.InputPort', 'InputPort', (['(self.x + offset + 1)', 'self.y'], {}), '(self.x + offset + 1, self.y)\n', (8294, 8323), False, 'from orca.ports import InputPort, OutputPort\n'), ((8350, 8393), 'orca.ports.OutputPort', 'OutputPort', (['(self.x + x + offset)', '(self.y + y)'], {}), '(self.x + x + offset, self.y + y)\n', (8360, 8393), False, 'from orca.ports import InputPort, OutputPort\n'), ((18148, 18204), 'orca.grid.MidiNoteOnEvent', 'MidiNoteOnEvent', (['channel', 'octave', 'note', 'velocity', 'length'], {}), '(channel, octave, note, velocity, length)\n', (18163, 18204), False, 'from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent\n'), ((4361, 4380), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (4370, 4380), False, 'from orca.ports import InputPort, OutputPort\n'), ((4403, 4422), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (4412, 4422), False, 'from orca.ports import InputPort, OutputPort\n'), ((4458, 4497), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_sensitive': '(True)'}), '(x, y + 1, is_sensitive=True)\n', (4468, 4497), False, 'from orca.ports import InputPort, OutputPort\n'), ((5097, 5116), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (5106, 5116), False, 'from orca.ports import InputPort, OutputPort\n'), ((5139, 5158), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (5148, 5158), False, 'from orca.ports import InputPort, OutputPort\n'), ((5194, 5233), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_sensitive': '(True)'}), '(x, y + 1, is_sensitive=True)\n', (5204, 5233), False, 'from orca.ports import InputPort, OutputPort\n'), ((5884, 5916), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {'default': '"""8"""'}), "(x + 1, y, default='8')\n", (5893, 5916), False, 'from orca.ports import InputPort, OutputPort\n'), ((5952, 5991), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_sensitive': '(True)'}), '(x, y + 1, is_sensitive=True)\n', (5962, 5991), False, 'from orca.ports import InputPort, OutputPort\n'), ((6696, 6728), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {'default': '"""8"""'}), "(x + 1, y, default='8')\n", (6705, 6728), False, 'from orca.ports import InputPort, OutputPort\n'), ((6764, 6798), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_bang': '(True)'}), '(x, y + 1, is_bang=True)\n', (6774, 6798), False, 'from orca.ports import InputPort, OutputPort\n'), ((7839, 7858), 'orca.ports.InputPort', 'InputPort', (['(x - 3)', 'y'], {}), '(x - 3, y)\n', (7848, 7858), False, 'from orca.ports import InputPort, OutputPort\n'), ((7881, 7900), 'orca.ports.InputPort', 'InputPort', (['(x - 2)', 'y'], {}), '(x - 2, y)\n', (7890, 7900), False, 'from orca.ports import InputPort, OutputPort\n'), ((9430, 9449), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (9439, 9449), False, 'from orca.ports import InputPort, OutputPort\n'), ((9472, 9491), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (9481, 9491), False, 'from orca.ports import InputPort, OutputPort\n'), ((9527, 9561), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_bang': '(True)'}), '(x, y + 1, is_bang=True)\n', (9537, 9561), False, 'from orca.ports import InputPort, OutputPort\n'), ((10112, 10144), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {'default': '"""1"""'}), "(x - 1, y, default='1')\n", (10121, 10144), False, 'from orca.ports import InputPort, OutputPort\n'), ((10169, 10188), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (10178, 10188), False, 'from orca.ports import InputPort, OutputPort\n'), ((10224, 10263), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_sensitive': '(True)'}), '(x, y + 1, is_sensitive=True)\n', (10234, 10263), False, 'from orca.ports import InputPort, OutputPort\n'), ((10952, 10971), 'orca.ports.InputPort', 'InputPort', (['x', '(y - 1)'], {}), '(x, y - 1)\n', (10961, 10971), False, 'from orca.ports import InputPort, OutputPort\n'), ((11007, 11027), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {}), '(x, y + 1)\n', (11017, 11027), False, 'from orca.ports import InputPort, OutputPort\n'), ((11579, 11598), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (11588, 11598), False, 'from orca.ports import InputPort, OutputPort\n'), ((11621, 11640), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (11630, 11640), False, 'from orca.ports import InputPort, OutputPort\n'), ((11676, 11715), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_sensitive': '(True)'}), '(x, y + 1, is_sensitive=True)\n', (11686, 11715), False, 'from orca.ports import InputPort, OutputPort\n'), ((12704, 12723), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (12713, 12723), False, 'from orca.ports import InputPort, OutputPort\n'), ((12748, 12767), 'orca.ports.InputPort', 'InputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (12757, 12767), False, 'from orca.ports import InputPort, OutputPort\n'), ((12803, 12842), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {'is_sensitive': '(True)'}), '(x, y + 1, is_sensitive=True)\n', (12813, 12842), False, 'from orca.ports import InputPort, OutputPort\n'), ((13881, 13900), 'orca.ports.InputPort', 'InputPort', (['(x - 2)', 'y'], {}), '(x - 2, y)\n', (13890, 13900), False, 'from orca.ports import InputPort, OutputPort\n'), ((14007, 14027), 'orca.ports.OutputPort', 'OutputPort', (['x', '(y + 1)'], {}), '(x, y + 1)\n', (14017, 14027), False, 'from orca.ports import InputPort, OutputPort\n'), ((15179, 15198), 'orca.ports.InputPort', 'InputPort', (['(x - 1)', 'y'], {}), '(x - 1, y)\n', (15188, 15198), False, 'from orca.ports import InputPort, OutputPort\n'), ((15234, 15254), 'orca.ports.OutputPort', 'OutputPort', (['(x + 1)', 'y'], {}), '(x + 1, y)\n', (15244, 15254), False, 'from orca.ports import InputPort, OutputPort\n'), ((16910, 16939), 'orca.ports.InputPort', 'InputPort', (['(self.x + 1)', 'self.y'], {}), '(self.x + 1, self.y)\n', (16919, 16939), False, 'from orca.ports import InputPort, OutputPort\n'), ((17095, 17124), 'orca.ports.InputPort', 'InputPort', (['(self.x + 3)', 'self.y'], {}), '(self.x + 3, self.y)\n', (17104, 17124), False, 'from orca.ports import InputPort, OutputPort\n')]
|
""" example plugin to extend a /test route """
from fastapi import APIRouter
router = APIRouter()
@router.get("/test")
async def tester():
""" test route """
return [{"result": "test"}]
|
[
"fastapi.APIRouter"
] |
[((87, 98), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (96, 98), False, 'from fastapi import APIRouter\n')]
|
import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(name='docassemble.ALRecipes',
version='0.1.0',
description=('A docassemble extension.'),
long_description='# docassemble.ALRecipes\r\n\r\n## Content\r\nThis repository includes both short examples you can insert directly into\r\nyour own playground, and longer examples that you can discover from its landing page: Quinten please add the link here.\r\n\r\n - Some Playground examples for the Document Assembly Line project.\r\n - Generic docassemble recipe interviews to address a particular need.\r\n \r\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\r\n\r\n## Add examples to your own playground\r\n\r\nEdit the /config, and add the following: \r\n\r\n```yaml\r\nplayground examples:\r\n - docassemble.ALRecipes:data/questions/examples.yml\r\n - docassemble.base:data/questions/example-list.yml \r\n```\r\n\r\n',
long_description_content_type='text/markdown',
author='AssemblyLine',
author_email='<EMAIL>',
license='The MIT License (MIT)',
url='https://docassemble.org',
packages=find_packages(),
namespace_packages=['docassemble'],
install_requires=['mechanize>=0.4.7'],
zip_safe=False,
package_data=find_package_data(where='docassemble/ALRecipes/', package='docassemble.ALRecipes'),
)
|
[
"os.path.isdir",
"distutils.util.convert_path",
"fnmatch.fnmatchcase",
"os.path.join",
"os.listdir",
"setuptools.find_packages"
] |
[((567, 584), 'os.listdir', 'os.listdir', (['where'], {}), '(where)\n', (577, 584), False, 'import os\n'), ((2940, 2955), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2953, 2955), False, 'from setuptools import setup, find_packages\n'), ((449, 468), 'distutils.util.convert_path', 'convert_path', (['where'], {}), '(where)\n', (461, 468), False, 'from distutils.util import convert_path\n'), ((603, 628), 'os.path.join', 'os.path.join', (['where', 'name'], {}), '(where, name)\n', (615, 628), False, 'import os\n'), ((644, 661), 'os.path.isdir', 'os.path.isdir', (['fn'], {}), '(fn)\n', (657, 661), False, 'import os\n'), ((1020, 1051), 'os.path.join', 'os.path.join', (['fn', '"""__init__.py"""'], {}), "(fn, '__init__.py')\n", (1032, 1051), False, 'import os\n'), ((772, 798), 'fnmatch.fnmatchcase', 'fnmatchcase', (['name', 'pattern'], {}), '(name, pattern)\n', (783, 798), False, 'from fnmatch import fnmatchcase\n'), ((1484, 1510), 'fnmatch.fnmatchcase', 'fnmatchcase', (['name', 'pattern'], {}), '(name, pattern)\n', (1495, 1510), False, 'from fnmatch import fnmatchcase\n')]
|
import math
import zlib
from time import sleep
import struct
from timeout_decorator import timeout
from timeout_decorator.timeout_decorator import TimeoutError
from pycrc.algorithms import Crc
from .ISPChip import ISPChip
NXPReturnCodes = {
"CMD_SUCCESS" : 0x0,
"INVALID_COMMAND" : 0x1,
"SRC_ADDR_ERROR" : 0x2,
"DST_ADDR_ERROR" : 0x3,
"SRC_ADDR_NOT_MAPPED" : 0x4,
"DST_ADDR_NOT_MAPPED" : 0x5,
"COUNT_ERROR" : 0x6,
"INVALID_SECTOR/INVALID_PAGE" : 0x7,
"SECTOR_NOT_BLANK" : 0x8,
"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION" : 0x9,
"COMPARE_ERROR" : 0xa,
"BUSY" : 0xb,
"PARAM_ERROR" : 0xc,
"ADDR_ERROR" : 0xd,
"ADDR_NOT_MAPPED" : 0xe,
"CMD_LOCKED" : 0xf,
"INVALID_CODE" : 0x10,
"INVALID_BAUD_RATE" : 0x11,
"INVALID_STOP_BIT" : 0x12,
"CODE_READ_PROTECTION_ENABLED" : 0x13,
"Unused 1" : 0x14,
"USER_CODE_CHECKSUM" : 0x15,
"Unused 2" : 0x16,
"EFRO_NO_POWER" : 0x17,
"FLASH_NO_POWER" : 0x18,
"Unused 3" : 0x19,
"Unused 4" : 0x1a,
"FLASH_NO_CLOCK" : 0x1b,
"REINVOKE_ISP_CONFIG" : 0x1c,
"NO_VALID_IMAGE" : 0x1d,
"FAIM_NO_POWER" : 0x1e,
"FAIM_NO_CLOCK" : 0x1f,
"NoStatusResponse" : 0xff,
}
def GetErrorCodeName(code: int) -> str:
code = int(code)
for item in NXPReturnCodes.items():
if code == item[1]:
return item[0]
return "Not Found"
def RaiseReturnCodeError(code: int, call_name: str) -> None:
if int(code) != NXPReturnCodes["CMD_SUCCESS"]:
raise UserWarning(
"Return Code Failure in {} {} {}".format(call_name, GetErrorCodeName(code), code))
def RemoveBootableCheckSum(vector_table_loc: int, image: bytes):
kuint32_t_size = 4
MakeBootable(vector_table_loc, image)
image_list = list(image)
for byte in range(kuint32_t_size):
image_list[vector_table_loc * kuint32_t_size + byte] = 0
return bytes(image_list)
# 2s compliment of checksum
def CalculateCheckSum(frame) -> int:
csum = 0
for entry in frame:
csum += entry
return (1<<32) - (csum % (1<<32))
def Crc32(frame) -> int:
#CRC32
polynomial = 0x104c11db6
crc = Crc(width=32, poly=polynomial, reflect_in=True,
xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00)
crc_calc = crc.bit_by_bit(frame)
return crc_calc
def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes:
# make this a valid image by inserting a checksum in the correct place
vector_table_size = 8
kuint32_t_size = 4
# Make byte array into list of little endian 32 bit words
intvecs = struct.unpack("<%dI"%vector_table_size,
orig_image[:vector_table_size * kuint32_t_size])
# calculate the checksum over the interrupt vectors
intvecs_list = list(intvecs[:vector_table_size])
intvecs_list[vector_table_loc] = 0 # clear csum value
csum = CalculateCheckSum(intvecs_list)
intvecs_list[vector_table_loc] = csum
vector_table_bytes = b''
for vecval in intvecs_list:
vector_table_bytes += struct.pack("<I", vecval)
return vector_table_bytes
def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes:
vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image)
image = vector_table_bytes + orig_image[len(vector_table_bytes):]
return image
def FillDataToFitSector(data: bytes, size: int) -> bytes:
if len(data) != size:
data += bytes([0xff] *(size - len(data)))
return data
class NXPChip(ISPChip):
kWordSize = 4
kPageSizeBytes = 64
SectorSizePages = 16
MaxByteTransfer = 1024
StatusRespLength = len(ISPChip.kNewLine) + 1
#Parity = None
#DataBits = 8
#StopBits = 1
SyncString = "Synchronized"+ISPChip.kNewLine
SyncStringBytes = bytes(SyncString, encoding="utf-8")
SyncVerified = bytes("OK"+ISPChip.kNewLine, encoding="utf-8")
ReturnCodes = NXPReturnCodes
CRCLocation = 0x000002fc
CRCValues = {
"NO_ISP": 0x4e697370,
"CRP1" : 0x12345678,
"CRP2" : 0x87654321,
"CRP3" : 0x43218765,
}
kSleepTime = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.CrystalFrequency = 12000#khz == 30MHz
self.SectorCount = 0
self.RAMSize = 0
self.RAMRange = [0, 0]
self.FlashRange = [0, 0]
self.RAMStartWrite = 0
self.kCheckSumLocation = 7 #0x0000001c
def FlashAddressLegal(self, address):
return address >= self.FlashRange[0] and address <= self.FlashRange[1];
def FlashRangeLegal(self, address, length):
print(self.FlashRange, address, length)
return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0
def RamAddressLegal(self, address):
return address >= self.RAMRange[0] and address <= self.RAMRange[1]
def RamRangeLegal(self, address, length):
return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0
def GetReturnCode(self) -> int:
for _ in range(10):
#sleep(.1)
try:
resp = self.ReadLine().strip()
return int(resp)
except ValueError:
pass
return self.ReturnCodes["NoStatusResponse"]
def AssertReturnCode(self, call_name: str) -> None:
'''
Get a return code with no response
'''
code = self.GetReturnCode()
RaiseReturnCodeError(code, call_name)
def Write(self, string : bytes) -> None:
#print(out)
assert(type(string) is bytes)
self.WriteSerial(string)
#self.WriteSerial(bytes(self.kNewLine, encoding = "utf-8"))
'''
Takes the command string, return the response code
'''
def WriteCommand(self, command_string: str) -> int:
self.Write(bytes(command_string + self.kNewLine, encoding="utf-8"))
return self.GetReturnCode()
def Unlock(self):
'''
Enables Flash Write, Erase, & Go
'''
self.ClearBuffer()
response_code = self.WriteCommand("U 23130")
RaiseReturnCodeError(response_code, "Unlock")
def SetBaudRate(self, baud_rate: int, stop_bits: int = 1):
'''
Baud Depends of FAIM config, stopbit is 1 or 2
'''
response_code = self.WriteCommand("B {} {}".format(baud_rate, stop_bits))
RaiseReturnCodeError(response_code, "Set Baudrate")
def Echo(self, on: bool = True):
'''
ISP echos host when enabled
'''
if on:
command = "A 1"
else:
command = "A 0"
response_code = self.WriteCommand(command)
RaiseReturnCodeError(response_code, "Set Echo")
def WriteToRam(self, start: int, data: bytes):
assert len(data)%self.kWordSize == 0
assert self.RamRangeLegal(start, len(data))
print("Write to RAM %d bytes"%len(data))
#while i < len(data):
# self.Write("W %d %d"%(start + i, kWordSize))
# self.AssertReturnCode("Write to RAM")#get confirmation
# self.Write(data[i:i+kWordSize])#Stream data after confirmation
# i+=kWordSize
#when transfer is complete the handler sends OK<CR><LF>
response_code = self.WriteCommand("W %d %d"%(start, len(data)))
RaiseReturnCodeError(response_code, "Write to RAM")
self.Write(data)#Stream data after confirmation
#self.Write("OK"+self.kNewLine)
try:
print(self.ReadLine())
except TimeoutError:
return
@timeout(4)
def ReadMemory(self, start: int, num_bytes: int):
assert num_bytes%self.kWordSize == 0
assert self.RamRangeLegal(start, num_bytes)
print("ReadMemory")
#self.Flush()
#self.Read()
#self.ClearBuffer()
#self.Flush()
print("R %d %d"%(start, num_bytes))
response_code = self.WriteCommand("R %d %d"%(start, num_bytes))
RaiseReturnCodeError(response_code, "Read Memory")
while len(self.data_buffer_in) < (num_bytes):
self.Read()
# Command success is sent at the end of the transferr
data = []
while self.data_buffer_in:
ch = self.data_buffer_in.popleft()
data.append(ch)
if len(data) != num_bytes:
print(data, len(data), num_bytes)
assert len(data) == num_bytes
return bytes(data)
def PrepSectorsForWrite(self, start: int, end: int):
try:
response_code = self.WriteCommand("P %d %d"%(start, end))
except Exception:
response_code = self.WriteCommand("P %d %d"%(start, end))
RaiseReturnCodeError(response_code, "Prep Sectors")
def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int):
assert self.RamRangeLegal(ram_address, num_bytes)
assert self.FlashRangeLegal(flash_address, num_bytes)
response_code = self.WriteCommand("C %d %d %d"%(flash_address, ram_address, num_bytes))
RaiseReturnCodeError(response_code, "Copy RAM To Flash")
#sleep(.2)
def Go(self, address: int, thumb_mode: bool = False):
'''
Start executing code at the specified spot
'''
mode = ""
if thumb_mode:
mode = 'T'
response_code = self.WriteCommand("G %d %s"%(address, mode))
RaiseReturnCodeError(response_code, "Go")
def EraseSector(self, start: int, end: int):
response_code = self.WriteCommand("E %d %d"%(start, end))
RaiseReturnCodeError(response_code, "Erase Sectors")
def ErasePages(self, start: int, end: int):
response_code = self.WriteCommand("X %d %d"%(start, end))
RaiseReturnCodeError(response_code, "Erase Pages")
def CheckSectorsBlank(self, start: int, end: int) -> bool:
assert start <= end
response_code = self.WriteCommand("I %d %d"%(start, end))
try:
self.ReadLine()
response = self.ReadLine().strip()
print("Check Sectors Blank response", response)
except TimeoutError:
pass
if response_code not in (NXPReturnCodes["CMD_SUCCESS"], NXPReturnCodes["SECTOR_NOT_BLANK"]):
RaiseReturnCodeError(response_code, "Blank Check Sectors")
return response_code == NXPReturnCodes["CMD_SUCCESS"]
def ReadPartID(self):
response_code = self.WriteCommand("J")
RaiseReturnCodeError(response_code, "Read Part ID")
resp = self.ReadLine()
return int(resp)
def ReadBootCodeVersion(self):
'''
LPC84x sends a 0x1a first for some reason.
Also the boot version seems to be Minor then Major not like the docs say
'''
response_code = self.WriteCommand("K")
RaiseReturnCodeError(response_code, "Read Bootcode Version")
minor = self.ReadLine().strip()
major = self.ReadLine().strip()
return "%d.%d"%(int(major), int(minor))
'''
Checks to see if two sections in the memory map are equal
'''
def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int):
self.Write(bytes(("M %d %d %d"%(address1, address2, num_bytes) + self.kNewLine), encoding="utf-8"))
response = self.ReadLine()
response_code = int(response[0])
if response_code not in (NXPReturnCodes["CMD_SUCCESS"], NXPReturnCodes["COMPARE_ERROR"]):
RaiseReturnCodeError(response_code, "Compare")
return response_code == NXPReturnCodes["CMD_SUCCESS"]
def ReadUID(self):
response_code = self.WriteCommand("N")
RaiseReturnCodeError(response_code, "Read UID")
uuids = [
self.ReadLine().strip(),
self.ReadLine().strip(),
self.ReadLine().strip(),
self.ReadLine().strip()]
return " ".join(["0x%08x"%int(uid) for uid in uuids])
def ReadCRC(self, address: int, num_bytes: int) -> int:
try:
response_code = self.WriteCommand("S %d %d"%(address, num_bytes))
except TimeoutError:
response_code = self.WriteCommand("S %d %d"%(address, num_bytes))
RaiseReturnCodeError(response_code, "Read CRC")
return int(self.ReadLine().strip())
def ReadFlashSig(self, start: int, end: int, wait_states: int = 2, mode: int = 0) -> str:
assert start < end
assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end))
response_code = self.WriteCommand("Z %d %d %d %d"%(start, end, wait_states, mode))
RaiseReturnCodeError(response_code, "Read Flash Signature")
sig = []
for i in range(4):
sig.append(self.ReadLine().strip())
return sig
def ReadWriteFAIM(self):
response_code = self.WriteCommand("O")
RaiseReturnCodeError(response_code, "Read Write FAIM")
def ResetSerialConnection(self):
self.Flush()
self.Write(bytes(self.kNewLine, encoding="utf-8"))
try:
self.ReadLine()
except TimeoutError:
pass
def InitConnection(self):
self.ResetSerialConnection()
try:
try:
self.SyncConnection()
self.SetCrystalFrequency(self.CrystalFrequency)
except (UserWarning, TimeoutError) as w:
print("Sync Failed", w)
print("Connect to running ISP")
self.ClearSerialConnection()
self.Echo(False)
try:
self.ReadLine()
self.Flush()
self.ClearBuffer()
except TimeoutError:
pass
uid = self.ReadUID()
print("Part UID: %s"%uid)
boot_code_version = self.ReadBootCodeVersion()
print("Boot Code Version: %s"%boot_code_version)
self.SetBaudRate(self.baud_rate)
print("Baudrate set to %d"%self.baud_rate)
except Exception as e:
print(e, type(e))
raise
def SyncConnection(self):
synced = False
self.ClearSerialConnection()
self.Flush()
for i in range(5):
self.Write(bytes('?'*15, encoding="utf-8"))
#self.Write('?' + self.kNewLine)
try:
frame_in = self.ReadLine()
if self.SyncString.strip() in frame_in.strip():
synced = True
break
except TimeoutError:
pass
if not synced:
#Check for SyncString
raise UserWarning("Syncronization Failure")
#self.Flush()
self.Write(self.SyncStringBytes)#echo SyncString
try:
frame_in = self.ReadLine()#discard echo
except TimeoutError:
pass
verified = False
for i in range(3):
try:
frame_in = self.ReadLine()#Should be OK\r\n
if self.SyncVerified.strip() in frame_in:
verified = True
break
except TimeoutError:
pass
if not verified:
raise UserWarning("Verification Failure")
print("Syncronization Successful")
def ClearSerialConnection(self):
self.Write(bytes(self.kNewLine, encoding="utf-8"))
self.ClearBuffer()
self.Flush()
self.Read()
self.ClearBuffer()
self.Flush()
for _ in range(2):
try:
self.ReadLine()
except TimeoutError:
pass
def SetCrystalFrequency(self, frequency_khz: int):
self.Write((bytes("%d"%frequency_khz + self.kNewLine, encoding="utf-8")))
verified = False
for i in range(3):
try:
frame_in = self.ReadLine()#Should be OK\r\n
if self.SyncVerified.strip() in frame_in:
verified = True
break
except TimeoutError:
pass
if not verified:
raise UserWarning("Verification Failure")
def CheckFlashWrite(self, data, flash_address: int) -> bool:
'''
Read Memory and compare it to what was written
'''
data_read = self.ReadMemory(flash_address, len(data))
if len(data) != len(data_read):
raise ValueError("Read Memory received incorrect amount of data")
if isinstance(type(data), data_read):
raise TypeError("data written and data read are of different types")
return data == data_read
def WriteFlashSector(self, sector: int, data: bytes):
ram_address = self.RAMStartWrite
sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages
flash_address = self.FlashRange[0] + sector*sector_size_bytes
print("\nWriting Sector: %d\nFlash Address: %x\nRAM Address: %x\n"%(sector, flash_address, ram_address))
assert len(data) == sector_size_bytes
#data += bytes(sector_size_bytes - len(data))
data_crc = zlib.crc32(data, 0)
try:
ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))
except Exception:
ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))
while ram_crc != data_crc:
sleep(self.kSleepTime)
self.WriteToRam(ram_address, data)
sleep(self.kSleepTime)
ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))
if data_crc != ram_crc:
print("CRC Check failed", data_crc, ram_crc)
else:
break
# Check to see if sector is already equal to RAM, if so skip
try:
self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes)
print("Flash already equal to RAM, skipping write")
return
except:
pass
print("Prep Sector")
self.PrepSectorsForWrite(sector, sector)
sleep(self.kSleepTime)
print("Erase Sector")
self.EraseSector(sector, sector)
sleep(self.kSleepTime)
assert self.CheckSectorsBlank(sector, sector)
sleep(self.kSleepTime)
print("Prep Sector")
sector_blank = self.CheckSectorsBlank(sector, sector)
assert sector_blank
sleep(self.kSleepTime)
self.PrepSectorsForWrite(sector, sector)
sleep(self.kSleepTime)
print("Write to Flash")
self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes)
sleep(self.kSleepTime)
flash_crc = self.ReadCRC(flash_address, num_bytes=len(data))
assert flash_crc == data_crc
assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes)
def WriteSector(self, sector: int, data: bytes):
#assert data
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert(len(data) > 0)
filled_data = FillDataToFitSector(data, sector_bytes)
self.WriteFlashSector(sector, filled_data)
sleep(self.kSleepTime)
#assert self.ReadSector(sector) == data_chunk
def WriteBinaryToFlash(self, image_file: str, start_sector: int):
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert sector_bytes%self.kWordSize == 0
with open(image_file, 'rb') as f:
prog = f.read()
image = prog
print("Program Length:", len(prog))
sector_count = int(math.ceil(len(prog)/sector_bytes))
assert start_sector + sector_count <= self.SectorCount
self.Unlock()
for sector in reversed(range(start_sector, start_sector + sector_count)):
print("\nWriting Sector %d"%sector)
data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1) * sector_bytes]
self.WriteSector(sector, data_chunk)
sleep(1)
chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1])
print("Flash Signature: %s"%chip_flash_sig)
print("Programming Complete.")
def WriteImage(self, image_file: str):
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert sector_bytes%self.kWordSize == 0
#make not bootable
self.Unlock()
self.WriteSector(0, bytes([0xde]*sector_bytes))
with open(image_file, 'rb') as f:
prog = f.read()
#image = RemoveBootableCheckSum(self.kCheckSumLocation, prog)
image = MakeBootable(self.kCheckSumLocation, prog)
print("Program Length:", len(prog))
sector_count = int(math.ceil(len(prog)/sector_bytes))
assert sector_count <= self.SectorCount
for sector in reversed(range(sector_count)):
print("\nWriting Sector %d"%sector)
data_chunk = image[sector * sector_bytes : (sector + 1) * sector_bytes]
self.WriteSector(sector, data_chunk)
chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1])
print("Flash Signature: %s"%chip_flash_sig)
print("Programming Complete.")
def FindFirstBlankSector(self) -> int:
for sector in range(self.SectorCount):
if self.CheckSectorsBlank(sector, self.SectorCount - 1):
return sector
return self.SectorCount - 1
def ReadSector(self, sector: int) -> bytes:
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert sector_bytes%self.kWordSize == 0
return self.ReadMemory(sector*sector_bytes, sector_bytes)
def ReadImage(self, image_file: str):
blank_sector = self.FindFirstBlankSector()
with open(image_file, 'wb') as f:
for sector in range(blank_sector):
print("Sector ", sector)
f.write(self.ReadSector(sector))
def MassErase(self):
last_sector = self.SectorCount - 1
sleep(1)
self.ClearBuffer()
self.Unlock()
self.PrepSectorsForWrite(0, last_sector)
self.EraseSector(0, last_sector)
print("Checking Sectors are blank")
assert self.CheckSectorsBlank(0, last_sector)
|
[
"struct.unpack",
"pycrc.algorithms.Crc",
"struct.pack",
"time.sleep",
"timeout_decorator.timeout",
"zlib.crc32"
] |
[((3023, 3125), 'pycrc.algorithms.Crc', 'Crc', ([], {'width': '(32)', 'poly': 'polynomial', 'reflect_in': '(True)', 'xor_in': '((1 << 32) - 1)', 'reflect_out': '(True)', 'xor_out': '(0)'}), '(width=32, poly=polynomial, reflect_in=True, xor_in=(1 << 32) - 1,\n reflect_out=True, xor_out=0)\n', (3026, 3125), False, 'from pycrc.algorithms import Crc\n'), ((3475, 3569), 'struct.unpack', 'struct.unpack', (["('<%dI' % vector_table_size)", 'orig_image[:vector_table_size * kuint32_t_size]'], {}), "('<%dI' % vector_table_size, orig_image[:vector_table_size *\n kuint32_t_size])\n", (3488, 3569), False, 'import struct\n'), ((8659, 8669), 'timeout_decorator.timeout', 'timeout', (['(4)'], {}), '(4)\n', (8666, 8669), False, 'from timeout_decorator import timeout\n'), ((3936, 3961), 'struct.pack', 'struct.pack', (['"""<I"""', 'vecval'], {}), "('<I', vecval)\n", (3947, 3961), False, 'import struct\n'), ((18180, 18199), 'zlib.crc32', 'zlib.crc32', (['data', '(0)'], {}), '(data, 0)\n', (18190, 18199), False, 'import zlib\n'), ((19108, 19130), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (19113, 19130), False, 'from time import sleep\n'), ((19210, 19232), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (19215, 19232), False, 'from time import sleep\n'), ((19295, 19317), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (19300, 19317), False, 'from time import sleep\n'), ((19446, 19468), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (19451, 19468), False, 'from time import sleep\n'), ((19526, 19548), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (19531, 19548), False, 'from time import sleep\n'), ((19664, 19686), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (19669, 19686), False, 'from time import sleep\n'), ((20173, 20195), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (20178, 20195), False, 'from time import sleep\n'), ((21055, 21063), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (21060, 21063), False, 'from time import sleep\n'), ((23103, 23111), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (23108, 23111), False, 'from time import sleep\n'), ((18425, 18447), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (18430, 18447), False, 'from time import sleep\n'), ((18507, 18529), 'time.sleep', 'sleep', (['self.kSleepTime'], {}), '(self.kSleepTime)\n', (18512, 18529), False, 'from time import sleep\n')]
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MozSpace'
db.create_table('mozspaces_mozspace', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('address', self.gf('django.db.models.fields.CharField')(max_length=300)),
('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=100)),
('country', self.gf('django.db.models.fields.CharField')(max_length=5)),
('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)),
('lon', self.gf('django.db.models.fields.FloatField')()),
('lat', self.gf('django.db.models.fields.FloatField')()),
('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)),
('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])),
))
db.send_create_signal('mozspaces', ['MozSpace'])
# Adding model 'Keyword'
db.create_table('mozspaces_keyword', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])),
))
db.send_create_signal('mozspaces', ['Keyword'])
# Adding model 'Photo'
db.create_table('mozspaces_photo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])),
))
db.send_create_signal('mozspaces', ['Photo'])
def backwards(self, orm):
# Deleting model 'MozSpace'
db.delete_table('mozspaces_mozspace')
# Deleting model 'Keyword'
db.delete_table('mozspaces_keyword')
# Deleting model 'Photo'
db.delete_table('mozspaces_photo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mozspaces.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keywords'", 'to': "orm['mozspaces.MozSpace']"})
},
'mozspaces.mozspace': {
'Meta': {'object_name': 'MozSpace'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'featured_mozspace'", 'null': 'True', 'to': "orm['mozspaces.Photo']"}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'extra_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mozspaces.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': "orm['mozspaces.MozSpace']"}),
'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['mozspaces']
|
[
"south.db.db.delete_table",
"south.db.db.send_create_signal"
] |
[((1650, 1698), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""mozspaces"""', "['MozSpace']"], {}), "('mozspaces', ['MozSpace'])\n", (1671, 1698), False, 'from south.db import db\n'), ((2118, 2165), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""mozspaces"""', "['Keyword']"], {}), "('mozspaces', ['Keyword'])\n", (2139, 2165), False, 'from south.db import db\n'), ((2576, 2621), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""mozspaces"""', "['Photo']"], {}), "('mozspaces', ['Photo'])\n", (2597, 2621), False, 'from south.db import db\n'), ((2707, 2744), 'south.db.db.delete_table', 'db.delete_table', (['"""mozspaces_mozspace"""'], {}), "('mozspaces_mozspace')\n", (2722, 2744), False, 'from south.db import db\n'), ((2789, 2825), 'south.db.db.delete_table', 'db.delete_table', (['"""mozspaces_keyword"""'], {}), "('mozspaces_keyword')\n", (2804, 2825), False, 'from south.db import db\n'), ((2868, 2902), 'south.db.db.delete_table', 'db.delete_table', (['"""mozspaces_photo"""'], {}), "('mozspaces_photo')\n", (2883, 2902), False, 'from south.db import db\n')]
|
"""One-off functions"""
import urllib.parse, random, string
def load_yaml(path):
import yaml
try:
with open(path, 'r') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.FullLoader)
return data
except FileNotFoundError:
raise FileNotFoundError('could not load yaml at path: {path}')
except Exception as e:
raise e
def parse_body(body):
body = urllib.parse.parse_qs(body)
for k, v in body.items():
if len(v) == 1:
body.update({k: v[0]})
return body
def unquote_plus(text):
return urllib.parse.unquote_plus(text)
def parse_url_path(url_path):
reformat = url_path.replace('%2F', '/')
reformat = reformat.replace('+', ' ')
return reformat
def random_string(stringLength=8):
"""https://pynative.com/python-generate-random-string/
Args:
stringLength (int, optional): length of string. Defaults to 8.
Returns:
str: random string
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
|
[
"yaml.load",
"random.choice"
] |
[((170, 214), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.FullLoader'}), '(yaml_file, Loader=yaml.FullLoader)\n', (179, 214), False, 'import yaml\n'), ((1046, 1068), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1059, 1068), False, 'import urllib.parse, random, string\n')]
|
# This file is depreciated. Controls had to be hard coded into app.py in the translate_static function. Babel could not translate from this file.
from flask_babel import Babel, _
# Controls for webapp
station_name_options = [
{'label': _('Resolute Bay, No. W. Territories'), 'value': 'Resolute Bay, No. W. Territories'},
{'label': _('Blossom Point, Maryland'), 'value': 'Blossom Point, Maryland'},
{'label': _('South Atlantic, Falkland Islands'), 'value': 'South Atlantic, Falkland Islands'},
{'label': _("St. John's, Newfoundland"), 'value': "St. John's, Newfoundland"},
{'label': _('Orroral Valley, Australia'), 'value': 'Orroral Valley, Australia'},
{'label': _('Prince Albert, Canada'), 'value': 'Prince Albert, Canada'},
{'label': _('Ottawa, Canada'), 'value': 'Ottawa, Canada'},
{'label': _('Byrd Station, Antartica'), 'value': 'Byrd Station, Antartica'},
{'label': _('Las Palmas, Canary Island'), 'value': 'Las Palmas, Canary Island'},
{'label': _('Winkfield, England'), 'value': 'Winkfield, England'},
{'label': _('Fort Myers, Florida'), 'value': 'Fort Myers, Florida'},
{'label': _('Antofagasta, Chile'), 'value': 'Antofagasta, Chile'},
{'label': _('East Grand Forks, Minnesota'), 'value': 'East Grand Forks, Minnesota'},
{'label': _('Rosman, No. Carolina'), 'value': 'Rosman, No. Carolina'},
{'label': _('College, Fairbanks, Alaska'), 'value': 'College, Fairbanks, Alaska'},
{'label': _('Woomera, Australia'), 'value': 'Woomera, Australia'},
{'label': _('Gilmore Creek, Fairbanks, Alaska'), 'value': 'Gilmore Creek, Fairbanks, Alaska'},
{'label': _('Tromso, Norway'), 'value': 'Tromso, Norway'},
{'label': _('University of Alaska, Fairbanks, Alaska'), 'value': 'University of Alaska, Fairbanks, Alaska'},
{'label': _('Darwin, Australia'), 'value': 'Darwin, Australia'},
{'label': _('Quito, Ecuador'), 'value': 'Quito, Ecuador'},
{'label': _('South Point, Hawaiian Islands'), 'value': 'South Point, Hawaiian Islands'},
{'label': _('Lima, Peru'), 'value': 'Lima, Peru'},
{'label': _('Johannesburg, South Africa'), 'value': 'Johannesburg, South Africa'},
{'label': _('Kano, Nigeria'), 'value': 'Kano, Nigeria'},
{'label': _('Tananarive, Madagascar'), 'value': 'Tananarive, Madagascar'},
{'label': _('Bretigny, France'), 'value': 'Bretigny, France'},
{'label': _('Singapore, Malaysia'), 'value': 'Singapore, Malaysia'},
{'label': _('Boulder, Colorado'), 'value': 'Boulder, Colorado'},
{'label': _('Mojave, California'), 'value': 'Mojave, California'},
{'label': _('Kauai, Hawaii'), 'value': 'Kauai, Hawaii'},
{'label': _('Kashima, Japan'), 'value': 'Kashima, Japan'}]
# Getting only the values of the station names
station_values = []
for station in station_name_options:
station_values.append(station['value'])
x_axis_options = [
{'label': _('Date'), 'value': ('timestamp')},
{'label': _('Latitude'), 'value': ('lat')},
{'label': _('Longitude'), 'value': ('lon')}]
y_axis_options = [
{'label': _('Minimum Frequency'), 'value': ('fmin')},
{'label': _('Maximum Depth'), 'value': ('max_depth')}]
year_dict = {}
for year in range(1962,1974):
year_dict[year] = str(year)
lat_dict = {}
for lat in range(-90, 90+1, 15):
lat_dict[lat] = str(lat)
lon_dict = {}
for lon in range(-180, 180+1, 30):
lon_dict[lon] = str(lon)
|
[
"flask_babel._"
] |
[((242, 279), 'flask_babel._', '_', (['"""Resolute Bay, No. W. Territories"""'], {}), "('Resolute Bay, No. W. Territories')\n", (243, 279), False, 'from flask_babel import Babel, _\n'), ((341, 369), 'flask_babel._', '_', (['"""Blossom Point, Maryland"""'], {}), "('Blossom Point, Maryland')\n", (342, 369), False, 'from flask_babel import Babel, _\n'), ((422, 459), 'flask_babel._', '_', (['"""South Atlantic, Falkland Islands"""'], {}), "('South Atlantic, Falkland Islands')\n", (423, 459), False, 'from flask_babel import Babel, _\n'), ((521, 550), 'flask_babel._', '_', (['"""St. John\'s, Newfoundland"""'], {}), '("St. John\'s, Newfoundland")\n', (522, 550), False, 'from flask_babel import Babel, _\n'), ((604, 634), 'flask_babel._', '_', (['"""Orroral Valley, Australia"""'], {}), "('Orroral Valley, Australia')\n", (605, 634), False, 'from flask_babel import Babel, _\n'), ((689, 715), 'flask_babel._', '_', (['"""Prince Albert, Canada"""'], {}), "('Prince Albert, Canada')\n", (690, 715), False, 'from flask_babel import Babel, _\n'), ((766, 785), 'flask_babel._', '_', (['"""Ottawa, Canada"""'], {}), "('Ottawa, Canada')\n", (767, 785), False, 'from flask_babel import Babel, _\n'), ((829, 857), 'flask_babel._', '_', (['"""Byrd Station, Antartica"""'], {}), "('Byrd Station, Antartica')\n", (830, 857), False, 'from flask_babel import Babel, _\n'), ((910, 940), 'flask_babel._', '_', (['"""Las Palmas, Canary Island"""'], {}), "('Las Palmas, Canary Island')\n", (911, 940), False, 'from flask_babel import Babel, _\n'), ((995, 1018), 'flask_babel._', '_', (['"""Winkfield, England"""'], {}), "('Winkfield, England')\n", (996, 1018), False, 'from flask_babel import Babel, _\n'), ((1066, 1090), 'flask_babel._', '_', (['"""Fort Myers, Florida"""'], {}), "('Fort Myers, Florida')\n", (1067, 1090), False, 'from flask_babel import Babel, _\n'), ((1139, 1162), 'flask_babel._', '_', (['"""Antofagasta, Chile"""'], {}), "('Antofagasta, Chile')\n", (1140, 1162), False, 'from flask_babel import Babel, _\n'), ((1210, 1242), 'flask_babel._', '_', (['"""East Grand Forks, Minnesota"""'], {}), "('East Grand Forks, Minnesota')\n", (1211, 1242), False, 'from flask_babel import Babel, _\n'), ((1299, 1324), 'flask_babel._', '_', (['"""Rosman, No. Carolina"""'], {}), "('Rosman, No. Carolina')\n", (1300, 1324), False, 'from flask_babel import Babel, _\n'), ((1374, 1405), 'flask_babel._', '_', (['"""College, Fairbanks, Alaska"""'], {}), "('College, Fairbanks, Alaska')\n", (1375, 1405), False, 'from flask_babel import Babel, _\n'), ((1461, 1484), 'flask_babel._', '_', (['"""Woomera, Australia"""'], {}), "('Woomera, Australia')\n", (1462, 1484), False, 'from flask_babel import Babel, _\n'), ((1532, 1569), 'flask_babel._', '_', (['"""Gilmore Creek, Fairbanks, Alaska"""'], {}), "('Gilmore Creek, Fairbanks, Alaska')\n", (1533, 1569), False, 'from flask_babel import Babel, _\n'), ((1631, 1650), 'flask_babel._', '_', (['"""Tromso, Norway"""'], {}), "('Tromso, Norway')\n", (1632, 1650), False, 'from flask_babel import Babel, _\n'), ((1694, 1738), 'flask_babel._', '_', (['"""University of Alaska, Fairbanks, Alaska"""'], {}), "('University of Alaska, Fairbanks, Alaska')\n", (1695, 1738), False, 'from flask_babel import Babel, _\n'), ((1807, 1829), 'flask_babel._', '_', (['"""Darwin, Australia"""'], {}), "('Darwin, Australia')\n", (1808, 1829), False, 'from flask_babel import Babel, _\n'), ((1876, 1895), 'flask_babel._', '_', (['"""Quito, Ecuador"""'], {}), "('Quito, Ecuador')\n", (1877, 1895), False, 'from flask_babel import Babel, _\n'), ((1939, 1973), 'flask_babel._', '_', (['"""South Point, Hawaiian Islands"""'], {}), "('South Point, Hawaiian Islands')\n", (1940, 1973), False, 'from flask_babel import Babel, _\n'), ((2032, 2047), 'flask_babel._', '_', (['"""Lima, Peru"""'], {}), "('Lima, Peru')\n", (2033, 2047), False, 'from flask_babel import Babel, _\n'), ((2087, 2118), 'flask_babel._', '_', (['"""Johannesburg, South Africa"""'], {}), "('Johannesburg, South Africa')\n", (2088, 2118), False, 'from flask_babel import Babel, _\n'), ((2174, 2192), 'flask_babel._', '_', (['"""Kano, Nigeria"""'], {}), "('Kano, Nigeria')\n", (2175, 2192), False, 'from flask_babel import Babel, _\n'), ((2235, 2262), 'flask_babel._', '_', (['"""Tananarive, Madagascar"""'], {}), "('Tananarive, Madagascar')\n", (2236, 2262), False, 'from flask_babel import Babel, _\n'), ((2314, 2335), 'flask_babel._', '_', (['"""Bretigny, France"""'], {}), "('Bretigny, France')\n", (2315, 2335), False, 'from flask_babel import Babel, _\n'), ((2381, 2405), 'flask_babel._', '_', (['"""Singapore, Malaysia"""'], {}), "('Singapore, Malaysia')\n", (2382, 2405), False, 'from flask_babel import Babel, _\n'), ((2454, 2476), 'flask_babel._', '_', (['"""Boulder, Colorado"""'], {}), "('Boulder, Colorado')\n", (2455, 2476), False, 'from flask_babel import Babel, _\n'), ((2523, 2546), 'flask_babel._', '_', (['"""Mojave, California"""'], {}), "('Mojave, California')\n", (2524, 2546), False, 'from flask_babel import Babel, _\n'), ((2594, 2612), 'flask_babel._', '_', (['"""Kauai, Hawaii"""'], {}), "('Kauai, Hawaii')\n", (2595, 2612), False, 'from flask_babel import Babel, _\n'), ((2655, 2674), 'flask_babel._', '_', (['"""Kashima, Japan"""'], {}), "('Kashima, Japan')\n", (2656, 2674), False, 'from flask_babel import Babel, _\n'), ((2887, 2896), 'flask_babel._', '_', (['"""Date"""'], {}), "('Date')\n", (2888, 2896), False, 'from flask_babel import Babel, _\n'), ((2937, 2950), 'flask_babel._', '_', (['"""Latitude"""'], {}), "('Latitude')\n", (2938, 2950), False, 'from flask_babel import Babel, _\n'), ((2985, 2999), 'flask_babel._', '_', (['"""Longitude"""'], {}), "('Longitude')\n", (2986, 2999), False, 'from flask_babel import Babel, _\n'), ((3054, 3076), 'flask_babel._', '_', (['"""Minimum Frequency"""'], {}), "('Minimum Frequency')\n", (3055, 3076), False, 'from flask_babel import Babel, _\n'), ((3112, 3130), 'flask_babel._', '_', (['"""Maximum Depth"""'], {}), "('Maximum Depth')\n", (3113, 3130), False, 'from flask_babel import Babel, _\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: command.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rcommand.proto\x12\tcommander\"U\n\x03\x43md\x12\x1f\n\x03\x63md\x18\x01 \x01(\x0e\x32\x12.commander.CmdType\x12\x0c\n\x04ints\x18\x02 \x03(\x05\x12\x0e\n\x06\x66loats\x18\x03 \x03(\x02\x12\x0f\n\x07strings\x18\x04 \x03(\t*\xe3\x01\n\x07\x43mdType\x12\x0e\n\nRAWKEYDOWN\x10\x00\x12\x0c\n\x08RAWKEYUP\x10\x01\x12\x0c\n\x08\x43OMMAND2\x10\x02\x12\x0c\n\x08\x43OMMAND3\x10\x03\x12\x0c\n\x08\x43OMMAND4\x10\x04\x12\x0c\n\x08\x43OMMAND5\x10\x05\x12\x0c\n\x08\x43OMMAND6\x10\x06\x12\x0c\n\x08\x43OMMAND7\x10\x07\x12\x0c\n\x08\x43OMMAND8\x10\x08\x12\x0c\n\x08\x43OMMAND9\x10\t\x12\x0f\n\x0bMAPORIGINAL\x10\n\x12\x07\n\x03MAP\x10\x0b\x12\x07\n\x03\x41\x43K\x10\x0c\x12\x08\n\x04\x41\x43K2\x10\r\x12\x08\n\x04HERO\x10\x0e\x12\t\n\x05READY\x10\x0f\x12\x08\n\x04INIT\x10\x10\x62\x06proto3')
_CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType']
CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE)
RAWKEYDOWN = 0
RAWKEYUP = 1
COMMAND2 = 2
COMMAND3 = 3
COMMAND4 = 4
COMMAND5 = 5
COMMAND6 = 6
COMMAND7 = 7
COMMAND8 = 8
COMMAND9 = 9
MAPORIGINAL = 10
MAP = 11
ACK = 12
ACK2 = 13
HERO = 14
READY = 15
INIT = 16
_CMD = DESCRIPTOR.message_types_by_name['Cmd']
Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), {
'DESCRIPTOR' : _CMD,
'__module__' : 'command_pb2'
# @@protoc_insertion_point(class_scope:commander.Cmd)
})
_sym_db.RegisterMessage(Cmd)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_CMDTYPE._serialized_start=116
_CMDTYPE._serialized_end=343
_CMD._serialized_start=28
_CMD._serialized_end=113
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper",
"google.protobuf.descriptor_pool.Default",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] |
[((531, 557), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (555, 557), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1468, 1511), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', (['_CMDTYPE'], {}), '(_CMDTYPE)\n', (1501, 1511), False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((1775, 1899), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""Cmd"""', '(_message.Message,)', "{'DESCRIPTOR': _CMD, '__module__': 'command_pb2'}"], {}), "('Cmd', (_message.Message,), {\n 'DESCRIPTOR': _CMD, '__module__': 'command_pb2'})\n", (1815, 1899), True, 'from google.protobuf import reflection as _reflection\n'), ((575, 601), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ([], {}), '()\n', (599, 601), True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')]
|
import argparse
import sys
from frankapy import FrankaArm
from frankapy import FrankaConstants as FC
def wait_for_enter():
if sys.version_info[0] < 3:
raw_input('Press Enter to continue:')
else:
input('Press Enter to continue:')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--time', '-t', type=float, default=10)
parser.add_argument('--open_gripper', '-o', action='store_true')
args = parser.parse_args()
print('Starting robot')
fa = FrankaArm()
if args.open_gripper:
fa.open_gripper()
print('Be very careful!! Make sure the robot can safely move to HOME JOINTS Position.')
wait_for_enter()
fa.reset_joints()
print('Using default joint impedances to move back and forth.')
wait_for_enter()
fa.goto_joints(FC.READY_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES)
fa.goto_joints(FC.HOME_JOINTS)
print('Now using different joint impedances to move back and forth.')
wait_for_enter()
fa.goto_joints(FC.READY_JOINTS, joint_impedances=[1500, 1500, 1500, 1250, 1250, 1000, 1000])
fa.goto_joints(FC.HOME_JOINTS)
print('Remember to reset the joint_impedances to defaults.')
fa.goto_joints(FC.HOME_JOINTS, joint_impedances=FC.DEFAULT_JOINT_IMPEDANCES)
|
[
"argparse.ArgumentParser",
"frankapy.FrankaArm"
] |
[((295, 320), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (318, 320), False, 'import argparse\n'), ((523, 534), 'frankapy.FrankaArm', 'FrankaArm', ([], {}), '()\n', (532, 534), False, 'from frankapy import FrankaArm\n')]
|
import hashlib
import json
import logging
from collections.abc import Mapping, Sequence
from typing import Any, List, Tuple
from nested_lookup import nested_lookup
from ordered_set import OrderedSet
from .pointer import fragment_decode, fragment_encode
LOG = logging.getLogger(__name__)
NON_MERGABLE_KEYS = ("uniqueItems", "insertionOrder")
TYPE = "type"
REF = "$ref"
UNPACK_SEQUENCE_IDENTIFIER = "*"
class FlatteningError(Exception):
pass
def item_hash(
item,
): # assumption -> input is only json comparable type (dict/list/scalar)
"""MD5 hash for an item (Dictionary/Iterable/Scalar)"""
dhash = hashlib.md5() # nosec
if isinstance(item, dict):
item = {k: item_hash(v) for k, v in item.items()}
if isinstance(item, list):
item = [item_hash(i) for i in item].sort()
encoded = json.dumps(item, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
def to_set(value: Any) -> OrderedSet:
return (
OrderedSet(value)
if isinstance(value, (list, OrderedSet))
else OrderedSet([value])
)
class ConstraintError(FlatteningError, ValueError):
def __init__(self, message, path, *args):
self.path = fragment_encode(path)
message = message.format(*args, path=self.path)
super().__init__(message)
class BaseRefPlaceholder:
"""A sentinel object representing a reference inside the base document."""
def __repr__(self):
"""Readable representation for debugging.
>>> repr(BaseRefPlaceholder())
'<BASE>'
"""
return "<BASE>"
#: The sentinel instance representing a reference inside the base document.
BASE = BaseRefPlaceholder()
def rewrite_ref(ref):
"""Rewrite a reference to be inside of the base document. A relative JSON
pointer is returned (in URI fragment identifier representation).
If the reference is already inside the base document (:const:`BASE`), the parts
are simply encoded into a pointer.
If the reference is outside of the base document, a unique pointer inside
the base document is made by namespacing the reference under the remote base
name inside the remote section.
>>> rewrite_ref((BASE, "foo", "bar"))
'#/foo/bar'
>>> rewrite_ref((BASE,))
'#'
>>> rewrite_ref(("remote", "foo", "bar"))
'#/remote/remote/foo/bar'
>>> rewrite_ref(("remote",))
'#/remote/remote'
"""
base, *parts = ref
if base is not BASE:
parts = ["remote", base] + parts
return fragment_encode(parts)
def traverse(document, path_parts):
"""Traverse the document according to the reference.
Since the document is presumed to be the reference's base, the base is
discarded. There is no validation that the reference is valid.
:raises ValueError, LookupError: the reference is invalid for this document
>>> traverse({"foo": {"bar": [42]}}, tuple())
({'foo': {'bar': [42]}}, (), None)
>>> traverse({"foo": {"bar": [42]}}, ["foo"])
({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}})
>>> traverse({"foo": {"bar": [42]}}, ("foo", "bar"))
([42], ('foo', 'bar'), {'bar': [42]})
>>> traverse({"foo": {"bar": [42]}}, ("foo", "bar", "0"))
(42, ('foo', 'bar', 0), [42])
>>> traverse({}, ["foo"])
Traceback (most recent call last):
...
KeyError: 'foo'
>>> traverse([], ["foo"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'foo'
>>> traverse([], [0])
Traceback (most recent call last):
...
IndexError: list index out of range
"""
parent = None
path = []
for part in path_parts:
if isinstance(document, Sequence):
part = int(part)
parent = document
document = document[part]
path.append(part)
return document, tuple(path), parent
def _resolve_ref(sub_schema: dict, definitions: dict, last_step: bool = False):
# resolve $ref
ref = nested_lookup(REF, sub_schema) # should be safe (always single value)
# bc sub_schema is always per paranet property
# (taken from definitions)
if last_step and REF not in sub_schema: # dont traverse deeper than requested
# check if $ref is used directly ->
# means that we need to check definition
# otherwise it's an array and return subschema
return sub_schema
if ref:
# [0] should be a single $ref in subschema on the top level
# [-1] $ref must follow #/definitions/object
sub_schema = definitions[fragment_decode(ref[0])[-1]]
# resolve properties
properties = nested_lookup("properties", sub_schema)
if properties:
sub_schema = properties[0]
return sub_schema
# pylint: disable=C0301
def traverse_raw_schema(schema: dict, path: tuple):
"""Traverse the raw json schema resolving $ref
:raises TypeError: either schema is not of type dict
:raises ConstraintError: the schema tries to override "type" or "$ref"
>>> traverse_raw_schema({"properties": {"bar": [42]}}, tuple())
{'bar': [42]}
>>> traverse_raw_schema({"properties": {"bar": [42]}}, ("bar",))
[42]
>>> traverse_raw_schema({"definitions": {"bar": {"type": "boolean"}},"properties": {"bar": {"$ref": "#/definitions/bar"}}}, ("bar",))
{'type': 'boolean'}
>>> traverse_raw_schema({"definitions":{"b":[1],"f":{"properties":{"b":{"$ref":"#/definitions/b"}}}},"properties":{"f":{"$ref":"#/definitions/f"}}},("f", "b")) # noqa: B950
[1]
>>> traverse_raw_schema({}, ("foo"))
{}
>>> traverse_raw_schema([], ["foo"])
Traceback (most recent call last):
...
TypeError: Schema must be a dictionary
"""
if not isinstance(schema, Mapping):
raise TypeError("Schema must be a dictionary")
try:
properties = schema["properties"]
definitions = schema.get("definitions", {})
sub_properties = properties
last_step = (
len(path) - 1
) # get amount of steps to prevent deeper traversal than requested
for step in path:
sub_properties = _resolve_ref(
sub_properties[step],
definitions,
last_step=path.index(step) == last_step,
)
return sub_properties
except KeyError as e:
LOG.debug("Malformed Schema or incorrect path provided\n%s\n%s", path, e)
return {}
def traverse_path_for_sequence_members(
document: dict, path_parts: Sequence, path: list = None
) -> Tuple[List[object], List[tuple]]:
"""Traverse the paths for all sequence members in the document according to the reference.
Since the document is presumed to be the reference's base, the base is
discarded. There is no validation that the reference is valid.
Differing from traverse, this returns a list of documents and a list of resolved paths.
:parameter document: document to traverse (dict or list)
:parameter path_parts: document paths to traverse
:parameter path: traversed path so far
:raises ValueError, LookupError: the reference is invalid for this document
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, tuple())
([{'foo': {'bar': [42, 43, 44]}}], [()])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ["foo"])
([{'bar': [42, 43, 44]}], [('foo',)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar"))
([[42, 43, 44]], [('foo', 'bar')])
>>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar", "*"))
([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*"))
([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)])
>>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*", "baz"))
([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')])
>>> traverse_path_for_sequence_members({}, ["foo"])
Traceback (most recent call last):
...
KeyError: 'foo'
>>> traverse_path_for_sequence_members([], ["foo"])
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'foo'
>>> traverse_path_for_sequence_members([], [0])
Traceback (most recent call last):
...
IndexError: list index out of range
"""
if path is None:
path = []
if not path_parts:
return [document], [tuple(path)]
path_parts = list(path_parts)
if not isinstance(document, Sequence):
return _handle_non_sequence_for_traverse(document, path_parts, path)
return _handle_sequence_for_traverse(document, path_parts, path)
def _handle_non_sequence_for_traverse(
current_document: dict, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`:
The next path part is the first part in the list of path parts.
The new document is obtained from the current document using the new path part as the key.
The next path part is added to the traversed path.
The traversal continues by recursively calling `traverse_path_for_sequence_members`
"""
part_to_handle = current_path_parts.pop(0)
current_document = current_document[part_to_handle]
current_path.append(part_to_handle)
return traverse_path_for_sequence_members(
current_document, current_path_parts, current_path
)
def _handle_sequence_for_traverse(
current_document: Sequence, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
Check the new path part for the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue:
The new document is obtained from the current document (a sequence) using the new path part as the index.
The next path part is added to the traversed path
"""
sequence_part = current_path_parts.pop(0)
if sequence_part == UNPACK_SEQUENCE_IDENTIFIER:
return _handle_unpack_sequence_for_traverse(
current_document, current_path_parts, current_path
)
# otherwise, sequence part should be a valid index
current_sequence_part = int(sequence_part)
current_document = current_document[current_sequence_part]
current_path.append(current_sequence_part)
return [current_document], [tuple(current_path)]
def _handle_unpack_sequence_for_traverse(
current_document: Sequence, current_path_parts: list, current_path: list
) -> Tuple[List[object], List[tuple]]:
"""
When unpacking a sequence, we need to include multiple paths and multiple documents, one for each sequence member.
For each sequence member:
Append the traversed paths w/ the sequence index, and get the new document.
The new document is obtained by traversing the current document using the sequence index.
The new document is appended to the list of new documents.
For each new document:
The remaining document is traversed using the remaining path parts.
The list of traversed documents and traversed paths are returned.
"""
documents = []
resolved_paths = []
new_documents = []
new_paths = []
for sequence_index in range(len(current_document)):
new_paths.append(current_path.copy() + [sequence_index])
new_document = traverse_path_for_sequence_members(
current_document, [sequence_index] + current_path_parts, current_path.copy()
)[0]
new_documents.extend(new_document)
for i in range(len(new_documents)): # pylint: disable=consider-using-enumerate
new_document = new_documents[i]
newer_documents, newer_paths = traverse_path_for_sequence_members(
new_document, current_path_parts, new_paths[i]
)
documents.extend(newer_documents)
resolved_paths.extend(newer_paths)
return documents, resolved_paths
def schema_merge(target, src, path): # noqa: C901 # pylint: disable=R0912
"""Merges the src schema into the target schema in place.
If there are duplicate keys, src will overwrite target.
:raises TypeError: either schema is not of type dict
:raises ConstraintError: the schema tries to override "type" or "$ref"
>>> schema_merge({}, {}, ())
{}
>>> schema_merge({'foo': 'a'}, {}, ())
{'foo': 'a'}
>>> schema_merge({}, {'foo': 'a'}, ())
{'foo': 'a'}
>>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ())
{'foo': 'b'}
>>> schema_merge({'required': 'a'}, {'required': 'b'}, ())
{'required': ['a', 'b']}
>>> a, b = {'$ref': 'a'}, {'foo': 'b'}
>>> schema_merge(a, b, ('foo',))
{'$ref': 'a', 'foo': 'b'}
>>> a, b = {'$ref': 'a'}, {'type': 'b'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b'])}
>>> a, b = {'$ref': 'a'}, {'$ref': 'b'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b'])}
>>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'}
>>> schema_merge(a, b, ('foo',))
{'type': OrderedSet(['a', 'b', 'c'])}
>>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}}
>>> schema_merge(a, b, ('foo',))
{'Foo': {'type': OrderedSet(['a', 'b'])}}
>>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE
{'type': OrderedSet(['a', 'b'])}
>>> schema_merge({'type': 'string'}, {'type': 'integer'}, ())
{'type': OrderedSet(['string', 'integer'])}
"""
if not (isinstance(target, Mapping) and isinstance(src, Mapping)):
raise TypeError("Both schemas must be dictionaries")
for key, src_schema in src.items():
try:
if key in (
REF,
TYPE,
): # $ref and type are treated similarly and unified
target_schema = target.get(key) or target.get(TYPE) or target[REF]
else:
target_schema = target[key] # carry over existing properties
except KeyError:
target[key] = src_schema
else:
next_path = path + (key,)
try:
target[key] = schema_merge(target_schema, src_schema, next_path)
except TypeError:
if key in (TYPE, REF): # combining multiple $ref and types
src_set = to_set(src_schema)
try:
target[TYPE] = to_set(
target[TYPE]
) # casting to ordered set as lib
# implicitly converts strings to sets
target[TYPE] |= src_set
except (TypeError, KeyError):
target_set = to_set(target_schema)
target[TYPE] = target_set | src_set
try:
# check if there are conflicting $ref and type
# at the same sub schema. Conflicting $ref could only
# happen on combiners because method merges two json
# objects without losing any previous info:
# e.g. "oneOf": [{"$ref": "..#1.."},{"$ref": "..#2.."}] ->
# { "ref": "..#1..", "type": [{},{}] }
target.pop(REF)
except KeyError:
pass
elif key == "required":
target[key] = sorted(set(target_schema) | set(src_schema))
else:
if key in NON_MERGABLE_KEYS and target_schema != src_schema:
msg = (
"Object at path '{path}' declared multiple values "
"for '{}': found '{}' and '{}'"
)
# pylint: disable=W0707
raise ConstraintError(msg, path, key, target_schema, src_schema)
target[key] = src_schema
return target
|
[
"hashlib.md5",
"nested_lookup.nested_lookup",
"json.dumps",
"ordered_set.OrderedSet",
"logging.getLogger"
] |
[((262, 289), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (279, 289), False, 'import logging\n'), ((623, 636), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (634, 636), False, 'import hashlib\n'), ((3992, 4022), 'nested_lookup.nested_lookup', 'nested_lookup', (['REF', 'sub_schema'], {}), '(REF, sub_schema)\n', (4005, 4022), False, 'from nested_lookup import nested_lookup\n'), ((4641, 4680), 'nested_lookup.nested_lookup', 'nested_lookup', (['"""properties"""', 'sub_schema'], {}), "('properties', sub_schema)\n", (4654, 4680), False, 'from nested_lookup import nested_lookup\n'), ((989, 1006), 'ordered_set.OrderedSet', 'OrderedSet', (['value'], {}), '(value)\n', (999, 1006), False, 'from ordered_set import OrderedSet\n'), ((1069, 1088), 'ordered_set.OrderedSet', 'OrderedSet', (['[value]'], {}), '([value])\n', (1079, 1088), False, 'from ordered_set import OrderedSet\n'), ((831, 863), 'json.dumps', 'json.dumps', (['item'], {'sort_keys': '(True)'}), '(item, sort_keys=True)\n', (841, 863), False, 'import json\n')]
|
# pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned
import pytest
from click.testing import CliRunner
from expecter import expect
from slackoff.cli import main
@pytest.fixture
def runner():
return CliRunner()
def describe_cli():
def describe_signout():
def it_can_force_signin(runner):
result = runner.invoke(main, ["Foobar", "--signout"])
expect(result.exit_code) == 0
expect(result.output) == "Currently signed out of Foobar\n"
|
[
"expecter.expect",
"click.testing.CliRunner"
] |
[((232, 243), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (241, 243), False, 'from click.testing import CliRunner\n'), ((414, 438), 'expecter.expect', 'expect', (['result.exit_code'], {}), '(result.exit_code)\n', (420, 438), False, 'from expecter import expect\n'), ((456, 477), 'expecter.expect', 'expect', (['result.output'], {}), '(result.output)\n', (462, 477), False, 'from expecter import expect\n')]
|
# coding: utf-8
import re
from inner_reuse import chunks
class Ptr(object):
"""
Contain operation data
"""
def __init__(self, pos, type_key):
self.position = pos
self.type_response = type_key
self.name_test = None
self.out = ""
def __repr__(self):
return str(dict((k, v) for k, v in self.__dict__.items() if k != 'position'))
def get_data(self):
"""
Returns:
key, value
"""
return self.type_response, (self.name_test, self.out)
def split_test_response(self, content_value):
"""
Returns:
(name, out)
"""
end_name_pos = content_value.find('\n')
if end_name_pos != -1:
self.name_test = content_value[:end_name_pos]
# Почему-то если в строке выше, то влияет на все строку, а не только на срезанную
self.name_test = self.name_test.replace('\r', '')
self.out = content_value[end_name_pos:].strip()
else:
self.name_test = content_value
def parser_out(out):
"""
Returns:
[(name, ok/fail/deadlock, out_one), ..., ]
"""
OK = "\[ OK \]"
RUN = "\[ RUN \]"
FAILED = "\[ FAILED \]"
template = []
template.extend([Ptr(a.start(), 'ok') for a in list(re.finditer(OK, out))])
template.extend([Ptr(a.start(), 'fail') for a in list(re.finditer(FAILED, out))])
template.extend([Ptr(a.end(), 'run') for a in list(re.finditer(RUN, out))])
how_split = 2 # По парам
response_pairs = chunks(sorted(template, key=lambda record: record.position), how_split)
result = []
for pair in response_pairs:
head = pair[0] # с ключем run идет вервой
if head.type_response == 'run':
if len(pair) == 1:
pair.append(Ptr(out.__len__(), 'deadlock'))
bottom = pair[1]
content = out[head.position:bottom.position].strip()
bottom.split_test_response(content)
yield bottom.get_data()
def parse_out_not_gen(out):
gen = parser_out(out)
report = []
for i in gen:
report.append(i)
return report
|
[
"re.finditer"
] |
[((1327, 1347), 're.finditer', 're.finditer', (['OK', 'out'], {}), '(OK, out)\n', (1338, 1347), False, 'import re\n'), ((1409, 1433), 're.finditer', 're.finditer', (['FAILED', 'out'], {}), '(FAILED, out)\n', (1420, 1433), False, 'import re\n'), ((1492, 1513), 're.finditer', 're.finditer', (['RUN', 'out'], {}), '(RUN, out)\n', (1503, 1513), False, 'import re\n')]
|
import numpy
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
x = [1,2,3,4]
x=numpy.array(x)
print(x.shape)
x=x.reshape(2,-1)
print(x.shape)
print(x)
x=x.reshape(-1)
print(x.shape)
print(x)
y = [2,4,6,8]
#x*2=[2,4,6,8]
#x*x=[1,4,9,16]
#sum(x) = 10
pf=numpy.polyfit(x,y,3)
print(pf)
print(type(pf))
model = numpy.poly1d(pf)
drv=model.deriv()
print(model([1,2,3,4]))
print(type(drv))
print(model)
print(drv)
coeff=r2_score(y, model(x))
print(coeff)
|
[
"numpy.poly1d",
"numpy.array",
"numpy.polyfit"
] |
[((119, 133), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (130, 133), False, 'import numpy\n'), ((294, 316), 'numpy.polyfit', 'numpy.polyfit', (['x', 'y', '(3)'], {}), '(x, y, 3)\n', (307, 316), False, 'import numpy\n'), ((349, 365), 'numpy.poly1d', 'numpy.poly1d', (['pf'], {}), '(pf)\n', (361, 365), False, 'import numpy\n')]
|
#!/usr/bin/python
from ConfigParser import SafeConfigParser
import os
import string
class baseObj:
def __init__(self, multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken):
self.multiProcess = multiProcess
self.userAgent = userAgent
self.outputFolderName = outputFolderName
self.outputFolder = outputFolder
self.deleteOutput = deleteOutput
self.dateFormat = dateFormat
self.useTor = useTor
self.torIP = torIP
self.torPort = torPort
self.redirectLimit = redirectLimit
self.hashCountLimit = hashCountLimit
self.urlCharLimit = urlCharLimit
self.osintDays = osintDays
self.malShareApiKey = malShareApiKey
self.disableMalShare = disableMalShare
self.disableOsint = disableOsint
self.otxKey = otxKey
self.shodanKey = shodanKey
self.vtKey = vtKey
self.vtUser = vtUser
self.vtReqPerMin = vtReqPerMin
self.disableVT = disableVT
self.viperUrlAdd = viperUrlAdd
self.viperApiToken = 'Token {0}'.format(viperApiToken)
def getBaseConfig(rootDir):
parser = SafeConfigParser()
parser.read(os.path.join(rootDir, 'config', 'settings.conf'))
multiProcess = parser.get('Core', 'multiprocess')
userAgent = parser.get('Core', 'useragent')
outputFolderName = parser.get('Core', 'outputfolder')
outputFolder = os.path.join(rootDir, outputFolderName)
deleteOutput = parser.get('Core', 'deleteoutput')
dateFormat = parser.get('Core', 'dateformat')
useTor = parser.get('Core', 'usetor')
torIP = parser.get('Core', 'torip')
torPort = parser.get('Core', 'torport')
redirectLimit = parser.get('Core', 'redirectlimit')
hashCountLimit = parser.get('Core', 'hashcountlimit')
urlCharLimit = parser.get('Core', 'urlcharlimit')
osintDays = parser.get('Core', 'osintdays')
malShareApiKey = parser.get('MalShare', 'apikey')
disableMalShare = parser.get('MalShare', 'disable')
disableOsint = parser.get('OSINT', 'disable')
otxKey = parser.get('OTX', 'apikey')
shodanKey = parser.get('Shodan', 'apikey')
vtKey = parser.get('VirusTotal', 'apikey')
vtUser = parser.get('VirusTotal', 'username')
vtReqPerMin = parser.get('VirusTotal', 'requestsperminute')
disableVT = parser.get('VirusTotal', 'disable')
viperUrlAdd = parser.get('Viper', 'addurl')
viperApiToken = parser.get('Viper', 'apitoken')
return baseObj(multiProcess, userAgent, outputFolderName, outputFolder, deleteOutput, dateFormat, useTor, torIP, torPort, redirectLimit, hashCountLimit, urlCharLimit, osintDays, malShareApiKey, disableMalShare, disableOsint, otxKey, shodanKey, vtKey, vtUser, vtReqPerMin, disableVT, viperUrlAdd, viperApiToken)
|
[
"ConfigParser.SafeConfigParser",
"os.path.join"
] |
[((1391, 1409), 'ConfigParser.SafeConfigParser', 'SafeConfigParser', ([], {}), '()\n', (1407, 1409), False, 'from ConfigParser import SafeConfigParser\n'), ((1656, 1695), 'os.path.join', 'os.path.join', (['rootDir', 'outputFolderName'], {}), '(rootDir, outputFolderName)\n', (1668, 1695), False, 'import os\n'), ((1426, 1474), 'os.path.join', 'os.path.join', (['rootDir', '"""config"""', '"""settings.conf"""'], {}), "(rootDir, 'config', 'settings.conf')\n", (1438, 1474), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from pathlib import Path
import pytest
import manimpango
from . import CASES_DIR
from ._manim import MarkupText
from .svg_tester import SVGStyleTester
ipsum_text = (
"<b>Lorem ipsum dolor</b> sit amet, <i>consectetur</i> adipiscing elit,"
"sed do eiusmod tempor incididunt ut labore et dolore"
"magna aliqua. Ut enim <b>ad</b> minim veniam, quis nostrud"
"exercitation ullamco laboris nisi ut aliquip"
"ex ea commodo consequat. Duis aute irure dolor"
"in reprehenderit in voluptate velit esse cillum"
"dolore eu fugiat nulla pariatur. Excepteur sint"
"occaecat cupidatat non proident, sunt in culpa qui"
"officia deserunt mollit anim id est laborum."
)
@pytest.mark.parametrize("text", ["foo", "<b>bar</b>", "வணக்கம்"])
def test_good_markup(text):
assert not manimpango.MarkupUtils.validate(
text,
), f"{text} should not fail validation"
@pytest.mark.parametrize("text", ["<b>foo", "<xyz>foo</xyz>"])
def test_bad_markup(text):
assert manimpango.MarkupUtils.validate(
text
), f"{text} should fail validation (unbalanced tags)"
@pytest.mark.parametrize(
"text,error",
[
(
"<b>foo",
"Error on line 1 char 23: Element “markup” was closed, "
"but the currently open element is “b”",
),
(
"<xyz>foo</xyz>",
"Unknown tag 'xyz' on line 1 char 14",
),
],
)
def test_bad_markup_error_message(text, error):
assert manimpango.MarkupUtils.validate(text) == error
def test_markup_text(tmpdir):
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(
'<span underline="error"><b><i>Hello Manim</i></b></span>', filename=str(loc)
)
assert loc.exists()
def test_markup_justify(tmpdir):
# don't know how to verify this correctly
# it varies upon diffent system so, we are
# just check whether it runs
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(ipsum_text, justify=True, filename=str(loc))
assert loc.exists()
def test_markup_indent(tmpdir):
# don't know how to verify this correctly
# it varies upon diffent system so, we are
# just check whether it runs
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(ipsum_text, indent=10, filename=str(loc))
assert loc.exists()
def test_markup_alignment(tmpdir):
# don't know how to verify this correctly
# it varies upon diffent system so, we are
# just check whether it runs
loc = Path(tmpdir, "test.svg")
assert not loc.exists()
MarkupText(
ipsum_text,
alignment=manimpango.Alignment.CENTER,
filename=str(loc),
)
assert loc.exists()
def test_markup_style(tmpdir):
test_case = CASES_DIR / "hello_blue_world_green.svg"
expected = tmpdir / "expected.svg"
text = "<span foreground='BLUE'>Hello</span>\n<span foreground='GREEN'>World</span>"
MarkupText(
text,
filename=str(expected),
)
s = SVGStyleTester(gotSVG=expected, expectedSVG=test_case)
assert len(s.got_svg_style) == len(s.expected_svg_style)
assert s.got_svg_style == s.expected_svg_style
def test_wrap_text(tmpdir):
tmpdir = Path(tmpdir)
wrapped = tmpdir / "wrap.svg"
nowrap = tmpdir / "nowarap.svg"
MarkupText(ipsum_text, wrap_text=False, filename=str(nowrap))
MarkupText(ipsum_text, filename=str(wrapped))
assert wrapped.read_text() != nowrap.read_text()
|
[
"pytest.mark.parametrize",
"pathlib.Path",
"manimpango.MarkupUtils.validate"
] |
[((720, 785), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['foo', '<b>bar</b>', 'வணக்கம்']"], {}), "('text', ['foo', '<b>bar</b>', 'வணக்கம்'])\n", (743, 785), False, 'import pytest\n'), ((924, 985), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['<b>foo', '<xyz>foo</xyz>']"], {}), "('text', ['<b>foo', '<xyz>foo</xyz>'])\n", (947, 985), False, 'import pytest\n'), ((1131, 1345), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text,error"""', '[(\'<b>foo\',\n \'Error on line 1 char 23: Element “markup” was closed, but the currently open element is “b”\'\n ), (\'<xyz>foo</xyz>\', "Unknown tag \'xyz\' on line 1 char 14")]'], {}), '(\'text,error\', [(\'<b>foo\',\n \'Error on line 1 char 23: Element “markup” was closed, but the currently open element is “b”\'\n ), (\'<xyz>foo</xyz>\', "Unknown tag \'xyz\' on line 1 char 14")])\n', (1154, 1345), False, 'import pytest\n'), ((1024, 1061), 'manimpango.MarkupUtils.validate', 'manimpango.MarkupUtils.validate', (['text'], {}), '(text)\n', (1055, 1061), False, 'import manimpango\n'), ((1604, 1628), 'pathlib.Path', 'Path', (['tmpdir', '"""test.svg"""'], {}), "(tmpdir, 'test.svg')\n", (1608, 1628), False, 'from pathlib import Path\n'), ((1960, 1984), 'pathlib.Path', 'Path', (['tmpdir', '"""test.svg"""'], {}), "(tmpdir, 'test.svg')\n", (1964, 1984), False, 'from pathlib import Path\n'), ((2267, 2291), 'pathlib.Path', 'Path', (['tmpdir', '"""test.svg"""'], {}), "(tmpdir, 'test.svg')\n", (2271, 2291), False, 'from pathlib import Path\n'), ((2574, 2598), 'pathlib.Path', 'Path', (['tmpdir', '"""test.svg"""'], {}), "(tmpdir, 'test.svg')\n", (2578, 2598), False, 'from pathlib import Path\n'), ((3271, 3283), 'pathlib.Path', 'Path', (['tmpdir'], {}), '(tmpdir)\n', (3275, 3283), False, 'from pathlib import Path\n'), ((830, 867), 'manimpango.MarkupUtils.validate', 'manimpango.MarkupUtils.validate', (['text'], {}), '(text)\n', (861, 867), False, 'import manimpango\n'), ((1515, 1552), 'manimpango.MarkupUtils.validate', 'manimpango.MarkupUtils.validate', (['text'], {}), '(text)\n', (1546, 1552), False, 'import manimpango\n')]
|
"""
API do Gerenciador de Casas de Aluguel
======================================
"""
# https://www.pythoncentral.io/introduction-to-sqlite-in-python/
import sqlite3
import config
def make_connection():
return sqlite3.connect(config.DATABASE_URL)
class InquilinoException(Exception):
...
class CasaException(Exception):
...
class DAO():
def __init__(self, conn):
self.conn = conn
class Casa_DAO(DAO):
def adiciona_casa(self, nome=None, valor_aluguel=None, agua=None,
instalacao_eletrica=None, commit=False, rollback=False):
if nome is None:
raise Exception("Necessário prover nome.")
if valor_aluguel is None:
raise Exception("Necessário prover um valor para o aluguel.")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
casa(nome_casa, valor_aluguel_casa, agua_casa, num_instalacao)
VALUES
(?,?,?,?)
""", (nome, valor_aluguel, agua, instalacao_eletrica))
if commit:
self.conn.commit()
return {
'id_casa': cursor.lastrowid,
'nome_casa': nome,
'valor_aluguel': valor_aluguel,
'agua_casa': agua,
'num_instalacao_eletrica': instalacao_eletrica
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def todas_casas(self, vazias=False):
cursor = self.conn.cursor()
if vazias:
cursor.execute("""
SELECT c.id_casa, nome_casa, valor_aluguel_casa,
agua_casa, i.num_instalacao, cpf_titular
FROM casa c
LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao
WHERE c.id_casa NOT IN (
SELECT casa.id_casa from casa
JOIN contrato ON contrato.id_casa= casa.id_casa
WHERE ativo )
GROUP BY c.id_casa;
""")
else:
cursor.execute("""
SELECT c.id_casa, nome_casa, valor_aluguel_casa,
agua_casa, i.num_instalacao, cpf_titular
FROM casa c
LEFT JOIN instalacao_eletrica i ON c.num_instalacao = i.num_instalacao;
""")
casas = cursor.fetchall()
return [{
'id_casa': x[0],
'nome_casa': x[1],
'valor_aluguel': x[2],
'agua_casa': x[3],
'num_instalacao_eletrica': x[4],
'cpf': x[5]
} for x in casas]
def altera_casa(self, id=None, commit=False, rollback=False,
**kwargs):
if id is None:
raise Exception("Necessário prover um ID")
if not len(kwargs):
raise Exception("Necessário prover novas informações para o Inquilino")
query = f'''UPDATE casa
SET {', '.join([f"{key}{'_casa' if key != 'num_instalacao' else '' } = ?" for key in kwargs.keys()])}
WHERE id_casa = ?'''
# return None
try:
cursor = self.conn.cursor()
cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id]))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
class Instalacao_Eletrica_DAO(DAO):
def adiciona_instalacao_eletrica(self, num_instalacao=None, cpf=None, commit=False, rollback=False):
if num_instalacao is None:
raise Exception("Necessário prover um número de instalação")
if cpf is None:
raise Exception("Necessário prover um número de CPF")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
instalacao_eletrica
VALUES
(?, ?)
""", (num_instalacao, cpf))
if commit:
self.conn.commit()
return {
'num_instalacao': num_instalacao,
'cpf_titular': cpf
}
except sqlite3.Error as e:
# e
if rollback:
self.conn.rollback()
return None
def altera_instalacao(self, num_instalacao, cpf, commit=False, rollback=False):
query = f'''UPDATE instalacao_eletrica
SET cpf_titular = ?
WHERE num_instalacao = ? '''
# return None
try:
cursor = self.conn.cursor()
cursor.execute(query, (cpf, num_instalacao))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def todas_instalacoes(self):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM instalacao_eletrica;
""")
instalcoes = cursor.fetchall()
return [{
'num_instalacao': x[0],
'cpf_titular': x[1]
} for x in instalacoes]
class Inquilino_DAO(DAO):
def adiciona_inquilino(self, cpf=None, nome=None,
rg=None, commit=False, rollback=False):
if cpf is None:
raise Exception("Necessário prover um número de CPF")
if nome is None:
raise Exception("Necessário prover um Nome")
if rg is None:
raise Exception("Necessário prover um RG")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
inquilino(cpf_inq, nome_inq, rg_inq)
VALUES
(?, ?, ?)
""", (cpf, nome, rg))
if commit:
self.conn.commit()
return {
'id_inq': cursor.lastrowid,
'cpf_inq': cpf,
'nome_inq': nome,
'rg_inq': rg
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def todos_inquilinos(self, ativos=False, inativos=False):
cursor = self.conn.cursor()
if ativos and inativos:
raise Exception("Conflito")
elif ativos:
cursor.execute("""
select * from inquilino
where id_inq in (select DISTINCT id_inq from contrato where ativo);
""")
elif inativos:
cursor.execute("""
select * from inquilino
where id_inq not in (select DISTINCT id_inq from contrato where ativo);
""")
else:
cursor.execute("""
SELECT * from inquilino;
""")
inquilinos = cursor.fetchall()
return [{
'id_inq': x[0],
'cpf_inq': x[1],
'nome_inq': x[2],
'rg_inq': x[3]
} for x in inquilinos]
def altera_inquilino(self, id=None, commit=False, rollback=False,
**kwargs):
if id is None:
raise Exception("Necessário prover um ID")
if not len(kwargs):
raise Exception("Necessário prover novas informações para o Inquilino")
query = f'''UPDATE inquilino
SET {', '.join([f'{key}_inq = ?' for key in kwargs.keys()])}
WHERE id_inq = ?'''
try:
cursor = self.conn.cursor()
cursor.execute(query, tuple((kwargs[k] for k in kwargs.keys())) + tuple([id]))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
class Contrato_DAO(DAO):
def adiciona_contrato(self, valor=None, ativo=True, dia_vencimento=None,
fim_contrato=None, casa=None, inq=None,
commit=False, rollback=False):
if valor is None:
raise Exception("Necessário prover um valor de aluguel para o contrato")
if dia_vencimento is None:
raise Exception("Necessário prover uma data de vencimento")
if casa is None:
raise Exception("Necessário escolher uma casa")
if inq is None:
raise Exception("Necessário escolher um inquilino")
try:
cursor = self.conn.cursor()
self._valida(inq, casa)
cursor.execute("""
INSERT INTO
contrato(valor, ativo, dt_fim_contrato, dia_venc_aluguel, id_casa, id_inq)
VALUES
(?,?,?,?,?,?)
""", (valor, ativo,fim_contrato, dia_vencimento, casa, inq))
if commit:
self.conn.commit()
return {
'id_contrato': cursor.lastrowid,
'valor': valor,
'ativo': ativo,
'dt_fim_contrato': fim_contrato,
'dia_venc_aluguel': dia_vencimento,
'id_casa': casa,
'id_inq': inq
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def _valida(self, id_inq=None, id_casa=None):
c = Contrato_DAO(make_connection())
if id_inq and id_inq in [x['id_inq'] for x in c.todos_contratos() if x['ativo']]:
raise InquilinoException()
if id_casa and id_casa in [x['id_casa'] for x in c.todos_contratos() if x['ativo']]:
raise CasaException()
def todos_contratos(self):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM contrato;
""")
contratos = cursor.fetchall()
return [{
'id_contrato': x[0],
'valor': x[1],
'ativo': x[2],
'dt_fim_contrato': x[3],
'dia_venc_aluguel': x[4],
'id_casa': x[5],
'id_inq': x[6]
} for x in contratos]
def altera_valor_contrato(self, id=None, valor=None, commit=False, rollback=False):
if id is None:
raise Exception("Necessário prover um ID")
if valor is None:
raise Exception("Necessário prover um valor")
query = f'''UPDATE contrato
SET valor = ?
WHERE id_contrato = ?'''
print(query)
try:
cursor = self.conn.cursor()
cursor.execute(query, (valor, id))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def inativa_contrato(self, id=None, commit=False, rollback=False):
if id is None:
raise Exception("Necessário prover um ID")
query = '''UPDATE contrato
SET ativo = 0
WHERE id_contrato = ?'''
try:
cursor = self.conn.cursor()
cursor.execute(query, (id, ))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def ativa_contrato(self, id=None, commit=False, rollback=False):
if id is None:
raise Exception("Necessário prover um ID")
C = self.get_contrato(id)
self._valida(C['id_inq'], C['id_casa'] )
query = '''UPDATE contrato
SET ativo = 1
WHERE id_contrato = ?'''
try:
cursor = self.conn.cursor()
cursor.execute(query, (id,))
if commit:
self.conn.commit()
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
def get_contrato(self, id):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM contrato
WHERE id_contrato = ?;
""", tuple([id]))
contratos = cursor.fetchall()
return [{
'id_contrato': x[0],
'valor': x[1],
'ativo': x[2],
'dt_fim_contrato': x[3],
'dia_venc_aluguel': x[4],
'id_casa': x[5],
'id_inq': x[6]
} for x in contratos][0]
class PagamentoDAO(DAO):
def realiza_pagamento(self, id_contrato=None, dt_pag=None, dt_venc=None, deposito=False, commit=False, rollback=False):
if id_contrato is None:
raise Exception("Necessário prover um contrato")
if dt_venc is None:
raise Exception("Necessário prover uma data de vencimento")
if dt_pag is None:
raise Exception("Necessário prover uma data de pagamento")
try:
cursor = self.conn.cursor()
cursor.execute("""
INSERT INTO
pagamento(dt_venc, dt_pag, deposito, id_contrato)
VALUES
(?, ?, ?, ?)
""", (dt_venc, dt_pag, deposito, id_contrato))
if commit:
self.conn.commit()
return {
'id_pag': cursor.lastrowid ,
'dt_venc': dt_venc ,
'dt_pag': dt_pag ,
'deposito': deposito ,
'id_contrato': id_contrato
}
except sqlite3.Error as e:
if rollback:
self.conn.rollback()
return None
def todos_pagamentos(self):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM pagamento;
""")
pagamentos = cursor.fetchall()
return [{
'id_pag': x[0] ,
'dt_venc': x[1] ,
'dt_pag': x[2] ,
'deposito': x[3] ,
'id_contrato': x[4]
} for x in pagamentos]
def todos_pagamentos_contrato(self, id_contrato):
cursor = self.conn.cursor()
cursor.execute("""
SELECT * FROM pagamento
WHERE pagamento.id_contrato = ?;
""", (id_contrato))
pagamentos = cursor.fetchall()
return [{
'id_pag': x[0] ,
'dt_venc': x[1] ,
'dt_pag': x[2] ,
'deposito': x[3] ,
'id_contrato': x[4]
} for x in pagamentos]
def start_db(conn):
cursor = conn.cursor()
cursor.executescript("""
CREATE TABLE IF NOT EXISTS instalacao_eletrica (
num_instalacao VARCHAR(20) NOT NULL PRIMARY KEY,
cpf_titular VARCHAR(11) NOT NULL UNIQUE
);
CREATE TABLE IF NOT EXISTS casa(
id_casa INTEGER NOT NULL PRIMARY KEY,
nome_casa INTEGER NOT NULL,
valor_aluguel_casa INTEGER NOT NULL,
agua_casa VARCHAR(10),
num_instalacao VARCHAR(11) UNIQUE,
FOREIGN KEY (num_instalacao) REFERENCES instalacao_eletrica(num_instalacao)
);
CREATE TABLE IF NOT EXISTS inquilino(
id_inq INTEGER NOT NULL PRIMARY KEY,
cpf_inq VARCHAR(11) NOT NULL UNIQUE,
nome_inq VARCHAR(40) NOT NULL,
rg_inq VARCHAR(10) NOT NULL
);
CREATE TABLE IF NOT EXISTS contrato(
id_contrato INTEGER NOT NULL PRIMARY KEY,
valor REAL NOT NULL,
ativo INTEGER NOT NULL,
dt_fim_contrato DATE NOT NULL,
dia_venc_aluguel INTEGER NOT NULL,
id_casa INTEGER NOT NULL,
id_inq INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS pagamento(
id_pag INTEGER NOT NULL PRIMARY KEY,
dt_venc VARCHAR(23) NOT NULL,
dt_pag VARCHAR(23),
deposito INTEGER NOT NULL,
id_contrato INTEGER ,
FOREIGN KEY (id_contrato) REFERENCES contrato(id_contrato)
);
""")
|
[
"sqlite3.connect"
] |
[((220, 256), 'sqlite3.connect', 'sqlite3.connect', (['config.DATABASE_URL'], {}), '(config.DATABASE_URL)\n', (235, 256), False, 'import sqlite3\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_tool.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_tool.proto',
package='basic',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0flist_tool.proto\x12\x05\x62\x61sic\x1a\x1etool_sdk/model/tool/tool.proto\"\x95\x01\n\x0fListToolRequest\x12\x0e\n\x06\x64\x65tail\x18\x01 \x01(\x08\x12\x0e\n\x06plugin\x18\x02 \x01(\x08\x12\x10\n\x08\x63\x61tegory\x18\x03 \x01(\t\x12\x13\n\x0bpermissions\x18\x04 \x01(\t\x12\x16\n\x0eonlyProduction\x18\x05 \x01(\x08\x12\x15\n\rshowInvisible\x18\x06 \x01(\x08\x12\x0c\n\x04tags\x18\x07 \x01(\t\"\\\n\x10ListToolResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x18\n\x04list\x18\x04 \x03(\x0b\x32\n.tool.Tool\"r\n\x17ListToolResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12%\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x17.basic.ListToolResponseb\x06proto3')
,
dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,])
_LISTTOOLREQUEST = _descriptor.Descriptor(
name='ListToolRequest',
full_name='basic.ListToolRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='detail', full_name='basic.ListToolRequest.detail', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='plugin', full_name='basic.ListToolRequest.plugin', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='basic.ListToolRequest.category', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='permissions', full_name='basic.ListToolRequest.permissions', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='onlyProduction', full_name='basic.ListToolRequest.onlyProduction', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='showInvisible', full_name='basic.ListToolRequest.showInvisible', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='basic.ListToolRequest.tags', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=208,
)
_LISTTOOLRESPONSE = _descriptor.Descriptor(
name='ListToolResponse',
full_name='basic.ListToolResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='basic.ListToolResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='basic.ListToolResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='basic.ListToolResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='basic.ListToolResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=302,
)
_LISTTOOLRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListToolResponseWrapper',
full_name='basic.ListToolResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='basic.ListToolResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='basic.ListToolResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='basic.ListToolResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='basic.ListToolResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=304,
serialized_end=418,
)
_LISTTOOLRESPONSE.fields_by_name['list'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL
_LISTTOOLRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTTOOLRESPONSE
DESCRIPTOR.message_types_by_name['ListToolRequest'] = _LISTTOOLREQUEST
DESCRIPTOR.message_types_by_name['ListToolResponse'] = _LISTTOOLRESPONSE
DESCRIPTOR.message_types_by_name['ListToolResponseWrapper'] = _LISTTOOLRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListToolRequest = _reflection.GeneratedProtocolMessageType('ListToolRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTTOOLREQUEST,
'__module__' : 'list_tool_pb2'
# @@protoc_insertion_point(class_scope:basic.ListToolRequest)
})
_sym_db.RegisterMessage(ListToolRequest)
ListToolResponse = _reflection.GeneratedProtocolMessageType('ListToolResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTTOOLRESPONSE,
'__module__' : 'list_tool_pb2'
# @@protoc_insertion_point(class_scope:basic.ListToolResponse)
})
_sym_db.RegisterMessage(ListToolResponse)
ListToolResponseWrapper = _reflection.GeneratedProtocolMessageType('ListToolResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTTOOLRESPONSEWRAPPER,
'__module__' : 'list_tool_pb2'
# @@protoc_insertion_point(class_scope:basic.ListToolResponseWrapper)
})
_sym_db.RegisterMessage(ListToolResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] |
[((463, 489), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (487, 489), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((8908, 9058), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""ListToolRequest"""', '(_message.Message,)', "{'DESCRIPTOR': _LISTTOOLREQUEST, '__module__': 'list_tool_pb2'}"], {}), "('ListToolRequest', (_message.\n Message,), {'DESCRIPTOR': _LISTTOOLREQUEST, '__module__': 'list_tool_pb2'})\n", (8948, 9058), True, 'from google.protobuf import reflection as _reflection\n'), ((9189, 9346), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""ListToolResponse"""', '(_message.Message,)', "{'DESCRIPTOR': _LISTTOOLRESPONSE, '__module__': 'list_tool_pb2'}"], {}), "('ListToolResponse', (_message.\n Message,), {'DESCRIPTOR': _LISTTOOLRESPONSE, '__module__': 'list_tool_pb2'}\n )\n", (9229, 9346), True, 'from google.protobuf import reflection as _reflection\n'), ((9481, 9651), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""ListToolResponseWrapper"""', '(_message.Message,)', "{'DESCRIPTOR': _LISTTOOLRESPONSEWRAPPER, '__module__': 'list_tool_pb2'}"], {}), "('ListToolResponseWrapper', (\n _message.Message,), {'DESCRIPTOR': _LISTTOOLRESPONSEWRAPPER,\n '__module__': 'list_tool_pb2'})\n", (9521, 9651), True, 'from google.protobuf import reflection as _reflection\n'), ((1817, 2154), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""detail"""', 'full_name': '"""basic.ListToolRequest.detail"""', 'index': '(0)', 'number': '(1)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='detail', full_name=\n 'basic.ListToolRequest.detail', index=0, number=1, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (1844, 2154), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2179, 2516), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""plugin"""', 'full_name': '"""basic.ListToolRequest.plugin"""', 'index': '(1)', 'number': '(2)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='plugin', full_name=\n 'basic.ListToolRequest.plugin', index=1, number=2, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2206, 2516), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3313, 3666), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""onlyProduction"""', 'full_name': '"""basic.ListToolRequest.onlyProduction"""', 'index': '(4)', 'number': '(5)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='onlyProduction', full_name=\n 'basic.ListToolRequest.onlyProduction', index=4, number=5, type=8,\n cpp_type=7, label=1, has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3340, 3666), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3691, 4042), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""showInvisible"""', 'full_name': '"""basic.ListToolRequest.showInvisible"""', 'index': '(5)', 'number': '(6)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='showInvisible', full_name=\n 'basic.ListToolRequest.showInvisible', index=5, number=6, type=8,\n cpp_type=7, label=1, has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3718, 4042), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4843, 5172), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""page"""', 'full_name': '"""basic.ListToolResponse.page"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='page', full_name=\n 'basic.ListToolResponse.page', index=0, number=1, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (4870, 5172), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5198, 5539), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""page_size"""', 'full_name': '"""basic.ListToolResponse.page_size"""', 'index': '(1)', 'number': '(2)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='page_size', full_name=\n 'basic.ListToolResponse.page_size', index=1, number=2, type=5, cpp_type\n =1, label=1, has_default_value=False, default_value=0, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (5225, 5539), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5563, 5894), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""total"""', 'full_name': '"""basic.ListToolResponse.total"""', 'index': '(2)', 'number': '(3)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='total', full_name=\n 'basic.ListToolResponse.total', index=2, number=3, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (5590, 5894), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5920, 6252), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""list"""', 'full_name': '"""basic.ListToolResponse.list"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='list', full_name=\n 'basic.ListToolResponse.list', index=3, number=4, type=11, cpp_type=10,\n label=3, has_default_value=False, default_value=[], message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (5947, 6252), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6701, 7038), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""code"""', 'full_name': '"""basic.ListToolResponseWrapper.code"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='code', full_name=\n 'basic.ListToolResponseWrapper.code', index=0, number=1, type=5,\n cpp_type=1, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (6728, 7038), True, 'from google.protobuf import descriptor as _descriptor\n'), ((7845, 8187), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""data"""', 'full_name': '"""basic.ListToolResponseWrapper.data"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='data', full_name=\n 'basic.ListToolResponseWrapper.data', index=3, number=4, type=11,\n cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (7872, 8187), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import argparse
import os
import re
import sys
from inc.HRDF.Stops_Reporter.stops_reporter import HRDF_Stops_Reporter
from inc.HRDF.HRDF_Parser.hrdf_helpers import compute_formatted_date_from_hrdf_db_path
from inc.HRDF.db_helpers import compute_db_tables_report
parser = argparse.ArgumentParser(description = 'Generate stops report from HRDF DB')
parser.add_argument('-p', '--path', help='Path to HRDF DB')
args = parser.parse_args()
db_path = args.path
if db_path is None:
print("ERROR, use with --path")
sys.exit(1)
compute_db_tables_report(db_path=db_path)
|
[
"inc.HRDF.db_helpers.compute_db_tables_report",
"argparse.ArgumentParser",
"sys.exit"
] |
[((273, 346), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate stops report from HRDF DB"""'}), "(description='Generate stops report from HRDF DB')\n", (296, 346), False, 'import argparse\n'), ((531, 572), 'inc.HRDF.db_helpers.compute_db_tables_report', 'compute_db_tables_report', ([], {'db_path': 'db_path'}), '(db_path=db_path)\n', (555, 572), False, 'from inc.HRDF.db_helpers import compute_db_tables_report\n'), ((518, 529), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (526, 529), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.buttonLayout = QtWidgets.QGridLayout()
self.gridLayout.addLayout(self.buttonLayout, 0, 0)
self.openfileButton = QtWidgets.QPushButton(self.centralwidget)
self.openfileButton.setObjectName("openfileButton")
self.buttonLayout.addWidget(self.openfileButton, 0, 0, 1, 1)
self.saveButton = QtWidgets.QPushButton(self.centralwidget)
self.saveButton.setObjectName('saveButton')
self.buttonLayout.addWidget(self.saveButton, 1, 0, 1, 1)
self.nextButton = QtWidgets.QPushButton(self.centralwidget)
self.nextButton.setObjectName('nextButton')
self.buttonLayout.addWidget(self.nextButton, 2, 0, 1, 1)
self.clearButton = QtWidgets.QPushButton(self.centralwidget)
self.clearButton.setObjectName('clearButton')
self.buttonLayout.addWidget(self.clearButton, 4, 0, 1, 1)
self.lastButton = QtWidgets.QPushButton(self.centralwidget)
self.lastButton.setObjectName('lastButton')
self.buttonLayout.addWidget(self.lastButton, 3, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label_pos = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_pos, 0, 1)
self.label_state = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_state, 1, 1)
self.label_file_name = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_file_name, 2, 1)
self.label_annotation_points = QtWidgets.QLabel()
self.buttonLayout.addWidget(self.label_annotation_points, 4, 1)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.openfileButton.setText(_translate("MainWindow", "open"))
self.saveButton.setText('save')
self.nextButton.setText('next')
self.clearButton.setText('clear')
self.lastButton.setText('previous')
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QMenuBar"
] |
[((437, 466), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (454, 466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((555, 596), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.centralwidget'], {}), '(self.centralwidget)\n', (576, 596), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((681, 704), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (702, 704), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((789, 812), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (810, 812), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((906, 947), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (927, 947), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1108, 1149), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1129, 1149), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1298, 1339), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1319, 1339), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1489, 1530), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1510, 1530), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1682, 1723), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1703, 1723), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1994, 2024), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (2012, 2024), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2206, 2238), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (2226, 2238), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2367, 2385), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2383, 2385), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2475, 2493), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2491, 2493), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2589, 2607), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2605, 2607), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2715, 2733), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2731, 2733), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2858, 2907), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (2895, 2907), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2059, 2086), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(800)', '(18)'], {}), '(0, 0, 800, 18)\n', (2071, 2086), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Post, Category, Tag
from typeidea.custom_site import custom_site
from django.utils.html import format_html
from django.core.urlresolvers import reverse
from .adminforms import PostAdminForm
from typeidea.custom_admin import BaseOwnerAdmin
# Register your models here.
@admin.register(Post, site=custom_site)
class PostAdmin(BaseOwnerAdmin):
form = PostAdminForm
list_display = ['title', 'category', 'status', 'owner', 'created_time', 'operator']
list_display_links = ['category', 'status']
search_fields = ['title', 'category__name', 'owner__first_name']
save_on_top = False
show_full_result_count = False # 优化显示结果
list_filter = ['title']
actions_on_top = True
date_hierarchy = 'created_time'
list_editable = ['title', ]
# 编辑页面
fieldsets = ( # 跟fields互斥
('基础配置', {
'fields': (('category', 'title'),
'desc',
'status',
'content')
}),
('高级配置', {
'classes': ('collapse', 'addon'),
'fields': ('tag',),
}),
) # 布局作用
filter_horizontal = ('tag',)
def operator(self, obj):
return format_html(
'<a href="{}">编辑</a>',
reverse('cus_site:blog_post_change', args=(obj.id,))
)
operator.show_description = '操作'
operator.empty_value_display = '???'
class PostInlineAdmin(admin.TabularInline):
fields = ('title', 'status')
extra = 1
model = Post
@admin.register(Category, site=custom_site)
class CategoryAdmin(BaseOwnerAdmin):
list_display = ['name', 'status','is_nav', 'created_time']
inlines = [PostInlineAdmin,]
fields = ('name', 'status', 'is_nav',)
@admin.register(Tag, site=custom_site)
class TagAdmin(BaseOwnerAdmin):
list_display = ['name', 'status', 'owner', 'created_time']
|
[
"django.contrib.admin.register",
"django.core.urlresolvers.reverse"
] |
[((389, 427), 'django.contrib.admin.register', 'admin.register', (['Post'], {'site': 'custom_site'}), '(Post, site=custom_site)\n', (403, 427), False, 'from django.contrib import admin\n'), ((1616, 1658), 'django.contrib.admin.register', 'admin.register', (['Category'], {'site': 'custom_site'}), '(Category, site=custom_site)\n', (1630, 1658), False, 'from django.contrib import admin\n'), ((1838, 1875), 'django.contrib.admin.register', 'admin.register', (['Tag'], {'site': 'custom_site'}), '(Tag, site=custom_site)\n', (1852, 1875), False, 'from django.contrib import admin\n'), ((1362, 1414), 'django.core.urlresolvers.reverse', 'reverse', (['"""cus_site:blog_post_change"""'], {'args': '(obj.id,)'}), "('cus_site:blog_post_change', args=(obj.id,))\n", (1369, 1414), False, 'from django.core.urlresolvers import reverse\n')]
|
from adabelief_pytorch import AdaBelief
import torch_optimizer
from torch import optim
from src.sam import SAM
__OPTIMIZERS__ = {
"AdaBelief": AdaBelief,
"RAdam": torch_optimizer.RAdam,
"SAM": SAM
}
def get_optimizer(cfg, model):
optimizer_name = cfg.optimizer.name
if optimizer_name == "SAM":
base_optimizer_name = cfg.optimizer.base
if __OPTIMIZERS__.get(base_optimizer_name) is not None:
base_optimizer = __OPTIMIZERS__[base_optimizer_name]
else:
base_optimizer = optim.__getattribute__(base_optimizer_name)
return SAM(model.parameters(), base_optimizer, **cfg.optimizer.param)
if __OPTIMIZERS__.get(optimizer_name) is not None:
return __OPTIMIZERS__[optimizer_name](model.parameters(), **cfg.optimizer.param)
else:
return optim.__getattribute__(optimizer_name)(model.parameters(), **cfg.optimizer.param)
|
[
"torch.optim.__getattribute__"
] |
[((539, 582), 'torch.optim.__getattribute__', 'optim.__getattribute__', (['base_optimizer_name'], {}), '(base_optimizer_name)\n', (561, 582), False, 'from torch import optim\n'), ((835, 873), 'torch.optim.__getattribute__', 'optim.__getattribute__', (['optimizer_name'], {}), '(optimizer_name)\n', (857, 873), False, 'from torch import optim\n')]
|
import numpy as np
import tensorflow as tf
import random
import _pickle as pkl
import matplotlib.pyplot as plt
from pylab import rcParams
import scipy
import scipy.stats as stats
from tensorflow.python.ops import gen_nn_ops
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
MEAN_IMAGE = np.zeros((1, 227, 227, 3)).astype(np.float32)
MEAN_IMAGE[:, :, :, 0] = 103.939
MEAN_IMAGE[:, :, :, 1] = 116.779
MEAN_IMAGE[:, :, :, 2] = 123.68
EPSILON = 1e-12
MIN_INPUT = -MEAN_IMAGE
MAX_INPUT = 255 * np.ones_like(MEAN_IMAGE).astype(np.float32) - MEAN_IMAGE
def dataReader():
X = np.zeros((100, 227, 227, 3))
y = np.zeros(100)
for num in range(4):
with open(
"./ImagenetValidationSamples/imagenet_sample_{}.pkl".format(
num), "rb") as inputs:
dic_temp = pkl.load(inputs)
X[num * 20:num * 20 + 20] = dic_temp["X"]
y[num * 20:num * 20 + 20] = dic_temp["y"]
labels = dic_temp["labels"]
return X, y.astype(int), labels
class SimpleGradientAttack(object):
def __init__(self,
mean_image,
sess,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
pixel_max: the maximum pixel value in the image.
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if NET.input.get_shape()[-3]!=test_image.shape[-3] or NET.input.get_shape()[-2]!=test_image.shape[-2] or\
NET.input.get_shape()[-1]!=test_image.shape[-1]:
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if NET2.input.get_shape()[-3]!=test_image.shape[-3] or NET2.input.get_shape()[-2]!=test_image.shape[-2] or\
NET2.input.get_shape()[-1]!=test_image.shape[-1]:
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.create_attack_ops(NET2, test_image.shape[-3],
test_image.shape[-2])
self.update_new_image(test_image, original_label)
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], self.test_image,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
self.test_image,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
self.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph:
self.original_label,
self.topk_ph:
self.original_topk,
self.mass_center_ph:
self.original_mass_center
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input:
feed,
NET.label_ph:
self.original_label,
self.topk_ph:
self.original_topk,
self.mass_center_ph:
self.original_mass_center
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.topK_direction,
in_image, self.NET2)
perturbation = np.reshape(perturbation, [w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.mass_center_direction, in_image,
self.NET2)
perturbation = np.reshape(perturbation, [w, h, c])
elif attack_method == "target":
self.use_target = True
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
in_image, self.NET2)
debug = self.run_model(self.sess, self.debug, in_image,
self.NET2)
print("MSE: ", debug)
perturbation = np.reshape(perturbation, [w, h, c])
return np.sign(perturbation)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * np.sign(pert) - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
test_image_pert, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
test_image_pert, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1.
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection",
target=None):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: Allowed maximum $ell_infty$ of perturbations, eg:8
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
beta_0: parameter for manipulate (target) attack
beta_1: parameter for manipulate (target) attack
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
self.beta_0 = beta_0
self.beta_1 = beta_1
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
min_criterion = 1.
perturb_size = 0.
last_image = None
for counter in range(iters):
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
pass
if criterion == 1.:
return None
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], self.perturbed_image, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class IntegratedGradientsAttack(object):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
reference_image=None,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
num_steps: Number of steps in Integrated Gradients Algorithm
reference_image: Mean subtracted reference image of Integrated Gradients Algorithm
pixel_max: the maximum pixel value in the image.
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if sum([
NET.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if sum([
NET2.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
self.num_steps = num_steps
self.reference_image = np.zeros_like(
test_image) if reference_image is None else reference_image
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.create_attack_ops(self.NET2, test_image.shape[-3],
test_image.shape[-2])
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
counterfactuals = self.create_counterfactuals(test_image)
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.debug = topK_loss
NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
NET.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def create_counterfactuals(self, in_image):
ref_subtracted = in_image - self.reference_image
counterfactuals = np.array([(float(i+1)/self.num_steps) * ref_subtracted + self.reference_image\
for i in range(self.num_steps)])
return np.array(counterfactuals)
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
NET.reference_image:
self.reference_image,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.reference_image: self.reference_image,
NET.label_ph: self.original_label,
self.target_map_ph: self.target_map
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
NET.reference_image:
self.reference_image,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.reference_image: self.reference_image,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
counterfactuals = self.create_counterfactuals(in_image)
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(self.num_steps, w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.NET2.topK_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.NET2.mass_center_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "target":
self.use_target = True
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation,
[self.num_steps, w, h, c])
perturbation_summed = np.sum(np.array([float(i+1)/self.num_steps*perturbation[i]\
for i in range(self.num_steps)]),0)
return np.sign(perturbation_summed)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * np.sign(pert) - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
counterfactuals = self.create_counterfactuals(test_image_pert)
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
counterfactuals, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
counterfactuals, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection"):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
self.beta_0 = beta_0
self.beta_1 = beta_1
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
min_criterion = 1.
for counter in range(iters):
# if counter % int(iters / 5) == 0:
# print("Iteration : {}".format(counter))
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
# print(pert.sum())
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
# print("attack")
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
# print("labels is changed")
pass
if min_criterion == 1.:
# print(
# "The attack was not successfull for maximum allowed perturbation size equal to {}"
# .format(epsilon))
# return 1., 1., self.original_confidence, 0.
return None
# print(
# '''For maximum allowed perturbation size equal to {}, the resulting perturbation size was equal to {}
# '''.format(epsilon,
# np.max(np.abs(self.test_image - self.perturbed_image))))
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
counterfactuals = self.create_counterfactuals(self.perturbed_image)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class SmoothGradientsAttack(object):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
reference_image=None,
target_map=None,
pixel_max=255.):
"""
Args:
mean_image: The mean image of the data set(The assumption is that the images are mean subtracted)
sess: Session containing model(and surrogate model's) graphs
test_image: Mean subtracted test image
original_label: True label of the image
NET: Original neural network. It's assumed that NET.saliency is the saliency map tensor and
NET.saliency_flatten is its flatten version.
NET2: Surrogate neural network with the same structure and weights of the orignal network but
with activations replaced by softplus function
(necessary only when the activation function of the original function
does not have second order gradients, ex: ReLU). It's assumed that NET.saliency is the
saliency map tensor and NET2.saliency_flatten is its flatten version.
k_top: the topK parameter of the attack (refer to the original paper)
num_steps: Number of steps in Integrated Gradients Algorithm
reference_image: not used
pixel_max: maximum pixel value in the input image
"""
self.pixel_max = pixel_max
if len(test_image.shape) != 3:
raise ValueError("Invalid Test Image Dimensions")
if sum([
NET.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Model's input dimensions is not Compatible with the provided test image!"
)
if self.check_prediction(sess, original_label, test_image, NET):
return
self.sess = sess
self.target_map = target_map
self.create_extra_ops(NET, test_image.shape[-3], test_image.shape[-2],
k_top)
if NET2 is None:
NET2 = NET
else:
self.create_extra_ops(NET2, test_image.shape[-3],
test_image.shape[-2], k_top)
if sum([
NET2.input.get_shape()[-i] != test_image.shape[-i]
for i in [1, 2, 3]
]):
raise ValueError(
"Surrogate model's input dimensions is not Compatible with the provided test image!"
)
self.NET = NET
self.NET2 = NET2
self.test_image = test_image
self.original_label = original_label
self.mean_image = mean_image
self.k_top = k_top
self.num_steps = num_steps
self.reference_image = np.zeros_like(
test_image) if reference_image is None else reference_image
w, h, c = self.mean_image.shape
self.topk_ph = tf.placeholder(tf.float32,
shape=[w * h],
name='topk_ph')
self.mass_center_ph = tf.placeholder(tf.float32,
shape=[2],
name='mass_center_ph')
self.target_map_ph = tf.placeholder(tf.float32,
shape=[w, h],
name='target_map_ph')
self.beta_0_ph = tf.placeholder(tf.float32, name='beta_0')
self.beta_1_ph = tf.placeholder(tf.float32, name='beta_1')
self.original_output = self.NET.predict(test_image[None, :])
_, num_class = self.original_output.shape
self.original_output_ph = tf.placeholder(
tf.float32, shape=[None, num_class],
name='original_output_ph') # only for the manipulation attack
self.create_attack_ops(self.NET2, test_image.shape[-3],
test_image.shape[-2])
self.update_new_image(test_image, original_label)
def update_new_image(self, test_image, original_label, target_map=None):
w, h, c = test_image.shape
self.test_image = test_image
self.original_label = original_label
assert self.check_prediction(self.sess, original_label, test_image,
self.NET) == False
if target_map is not None:
self.target_map = target_map
self.original_output = self.NET2.predict(test_image[None, :])
counterfactuals = self.create_counterfactuals(test_image)
self.saliency1, self.topK = self.run_model(
self.sess, [self.NET.saliency, self.NET.top_idx], counterfactuals,
self.NET)
self.saliency1_flatten = np.reshape(
self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])
elem1 = np.argsort(np.reshape(self.saliency1, [w * h]))[-self.k_top:]
self.elements1 = np.zeros(w * h)
self.elements1[elem1] = 1
self.original_topk = self.elements1
self.mass_center1 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals,
self.NET).astype(int)
self.original_mass_center = self.mass_center1
def check_prediction(self, sess, original_label, image, NET):
""" If the network's prediction is incorrect in the first place, attacking has no meaning."""
predicted_scores = sess.run(
NET.output,
feed_dict={NET.input: image if len(image.shape) == 4 else [image]})
if np.argmax(predicted_scores, 1) != original_label:
print("Network's Prediction is Already Incorrect!")
print("Pred: ", np.argmax(predicted_scores, 1))
print("Label: ", original_label)
return True
else:
self.original_confidence = np.max(predicted_scores)
return False
def create_extra_ops(self, NET, w, h, k_top):
top_val, NET.top_idx = tf.nn.top_k(NET.saliency_flatten, k_top)
y_mesh, x_mesh = np.meshgrid(np.arange(h), np.arange(w))
NET.mass_center = tf.stack([
tf.reduce_sum(NET.saliency * x_mesh) / (w * h),
tf.reduce_sum(NET.saliency * y_mesh) / (w * h)
])
def create_attack_ops(self, NET, w, h):
topK_loss = tf.reduce_sum((NET.saliency_flatten * self.topk_ph))
self.debug = topK_loss
NET.topK_direction = -tf.gradients(topK_loss, NET.input)[0]
mass_center_loss = -tf.reduce_sum(
(NET.mass_center - self.mass_center_ph)**2)
NET.mass_center_direction = -tf.gradients(mass_center_loss,
NET.input)[0]
if self.target_map is not None:
target_dis = tf.keras.losses.MSE(self.target_map_ph, NET.saliency)
output_dis = tf.keras.losses.MSE(self.original_output_ph,
NET.output)
target_loss = tf.reduce_mean(
target_dis) * self.beta_0_ph + self.beta_1_ph * tf.reduce_mean(
output_dis)
self.debug = target_loss
self.target_direction = -tf.gradients(target_loss, NET.input)[0]
def create_counterfactuals(self, in_image, noise_ratio=0.1):
counterfactuals = np.array([
in_image + np.random.normal(scale=0.1 *
(in_image.max() - in_image.min()),
size=in_image.shape)
for _ in range(self.num_steps)
])
return np.array(counterfactuals)
def run_model(self, sess, operation, feed, NET):
if len(feed.shape) == 3:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: [feed],
NET.label_ph: self.original_label,
self.target_map_ph: self.target_map
})
elif len(feed.shape) == 4:
if hasattr(self, "original_topk") and hasattr(
self, "original_mass_center"):
if hasattr(self, "use_target") and self.use_target:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.beta_0_ph: self.beta_0,
self.beta_1_ph: self.beta_1,
self.original_output_ph:
self.original_output,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
self.topk_ph: self.original_topk,
self.mass_center_ph:
self.original_mass_center,
self.target_map_ph: self.target_map
})
else:
return sess.run(operation,
feed_dict={
NET.input: feed,
NET.label_ph: self.original_label,
})
else:
raise RuntimeError("Input image shape invalid!")
def give_simple_perturbation(self, attack_method, in_image):
counterfactuals = self.create_counterfactuals(in_image)
w, h, c = self.test_image.shape
if attack_method == "random":
perturbation = np.random.normal(size=(self.num_steps, w, h, c))
elif attack_method == "topK":
perturbation = self.run_model(self.sess, self.NET2.topK_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "mass_center":
perturbation = self.run_model(self.sess,
self.NET2.mass_center_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation, [self.num_steps, w, h, c])
elif attack_method == "target":
if self.target_map is None:
raise ValueError("No target region determined!")
else:
perturbation = self.run_model(self.sess, self.target_direction,
counterfactuals, self.NET2)
perturbation = np.reshape(perturbation,
[self.num_steps, w, h, c])
perturbation_summed = np.mean(perturbation, 0)
return np.sign(perturbation_summed)
def apply_perturb(self, in_image, pert, alpha, bound=8 / 255, ord=np.inf):
if self.mean_image is None:
self.mean_image = np.zeros_like(in_image)
# out_image = self.test_image + np.clip(
# in_image + alpha * np.sign(pert) - self.test_image, -bound, bound)
d = in_image + alpha * pert - self.test_image
d_norm = np.linalg.norm(d.flatten(), ord=ord)
if d_norm > bound:
proj_ratio = bound / np.linalg.norm(d.flatten(), ord=ord)
else:
proj_ratio = 1
out_image = self.test_image + d * proj_ratio
out_image = np.clip(out_image, -self.mean_image,
self.pixel_max - self.mean_image)
return out_image
def check_measure(self, test_image_pert, measure):
prob = self.run_model(self.sess, self.NET.output, test_image_pert,
self.NET)
if np.argmax(prob, 1) == self.original_label:
counterfactuals = self.create_counterfactuals(test_image_pert)
if measure == "intersection":
top2 = self.run_model(self.sess, self.NET.top_idx,
counterfactuals, self.NET)
criterion = float(len(np.intersect1d(self.topK,
top2))) / self.k_top
elif measure == "correlation":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
counterfactuals, self.NET)
criterion = scipy.stats.spearmanr(self.saliency1_flatten,
saliency2_flatten)[0]
elif measure == "mass_center":
center2 = self.run_model(self.sess, self.NET.mass_center,
counterfactuals, self.NET).astype(int)
criterion = -np.linalg.norm(self.mass_center1 - center2)
elif measure == "cosine":
saliency2_flatten = self.run_model(self.sess,
self.NET.saliency_flatten,
test_image_pert, self.NET)
criterion = scipy.spatial.distance.cosine(
self.saliency1_flatten, saliency2_flatten)
else:
raise ValueError("Invalid measure!")
return criterion
else:
return 1.
def iterative_attack(self,
attack_method,
epsilon,
iters=100,
alpha=1,
beta_0=1e11,
beta_1=1e6,
measure="intersection"):
"""
Args:
attack_method: One of "mass_center", "topK" or "random"
epsilon: set of allowed maximum $ell_infty$ of perturbations, eg:[2,4]
iters: number of maximum allowed attack iterations
alpha: perturbation size in each iteration of the attack
measure: measure for success of the attack (one of "correlation", "mass_center" or "intersection")
Returns:
intersection: The portion of the top K salient pixels in the original picture that are in the
top K salient pixels of the perturbed image devided
correlation: The rank correlation between saliency maps of original and perturbed image
center_dislocation: The L2 distance between saliency map mass centers in original and perturbed images
confidence: The prediction confidence of the perturbed image
"""
w, h, c = self.test_image.shape
test_image_pert = self.test_image.copy()
self.original = self.test_image.copy()
if attack_method == 'target':
self.use_target = True
else:
self.use_target = False
self.beta_0 = beta_0
self.beta_1 = beta_1
min_criterion = 1.
last_image = None
for counter in range(iters):
pert = self.give_simple_perturbation(attack_method,
test_image_pert)
test_image_pert = self.apply_perturb(test_image_pert, pert, alpha,
epsilon)
criterion = self.check_measure(test_image_pert, measure)
if criterion < min_criterion:
min_criterion = criterion
self.perturbed_image = test_image_pert.copy()
perturb_size = np.max(
np.abs(self.test_image - self.perturbed_image))
else:
pass
if criterion == 1.:
return None
predicted_scores = self.run_model(self.sess, self.NET.output,
self.perturbed_image, self.NET)
confidence = np.max(predicted_scores)
counterfactuals = self.create_counterfactuals(self.perturbed_image)
self.saliency2, self.top2, self.mass_center2= self.run_model\
(self.sess, [self.NET.saliency, self.NET.top_idx, self.NET.mass_center], counterfactuals, self.NET)
correlation = scipy.stats.spearmanr(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))[0]
intersection = float(len(np.intersect1d(self.topK,
self.top2))) / self.k_top
center_dislocation = np.linalg.norm(self.mass_center1 -
self.mass_center2.astype(int))
cos_distance = scipy.spatial.distance.cosine(
self.saliency1_flatten, np.reshape(self.saliency2, [w * h]))
return intersection, correlation, center_dislocation, confidence, perturb_size, cos_distance
class UniGradientsAttack(SmoothGradientsAttack):
def __init__(self,
sess,
mean_image,
test_image,
original_label,
NET,
NET2=None,
k_top=1000,
num_steps=100,
radii=4,
reference_image=None,
target_map=None,
pixel_max=255.):
self.radii = radii / (255. / pixel_max)
super(UniGradientsAttack,
self).__init__(sess,
mean_image,
test_image,
original_label,
NET,
NET2=NET2,
k_top=1000,
num_steps=num_steps,
reference_image=reference_image,
target_map=target_map,
pixel_max=255.)
def create_counterfactuals(self, in_image):
counterfactuals = np.array([
in_image +
np.random.uniform(-1, 1, size=in_image.shape) * self.radii
for _ in range(self.num_steps)
])
return np.array(counterfactuals)
|
[
"tensorflow.reduce_sum",
"numpy.abs",
"tensorflow.keras.losses.MSE",
"numpy.argmax",
"numpy.clip",
"tensorflow.ConfigProto",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.normal",
"_pickle.load",
"numpy.zeros_like",
"tensorflow.nn.top_k",
"tensorflow.placeholder",
"numpy.max",
"numpy.reshape",
"tensorflow.gradients",
"numpy.intersect1d",
"numpy.ones_like",
"tensorflow.reduce_mean",
"numpy.random.uniform",
"scipy.spatial.distance.cosine",
"scipy.stats.spearmanr",
"numpy.zeros",
"numpy.array",
"numpy.sign"
] |
[((237, 253), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (251, 253), True, 'import tensorflow as tf\n'), ((598, 626), 'numpy.zeros', 'np.zeros', (['(100, 227, 227, 3)'], {}), '((100, 227, 227, 3))\n', (606, 626), True, 'import numpy as np\n'), ((635, 648), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (643, 648), True, 'import numpy as np\n'), ((311, 337), 'numpy.zeros', 'np.zeros', (['(1, 227, 227, 3)'], {}), '((1, 227, 227, 3))\n', (319, 337), True, 'import numpy as np\n'), ((3924, 3981), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w * h]', 'name': '"""topk_ph"""'}), "(tf.float32, shape=[w * h], name='topk_ph')\n", (3938, 3981), True, 'import tensorflow as tf\n'), ((4088, 4148), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[2]', 'name': '"""mass_center_ph"""'}), "(tf.float32, shape=[2], name='mass_center_ph')\n", (4102, 4148), True, 'import tensorflow as tf\n'), ((4268, 4330), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w, h]', 'name': '"""target_map_ph"""'}), "(tf.float32, shape=[w, h], name='target_map_ph')\n", (4282, 4330), True, 'import tensorflow as tf\n'), ((4574, 4652), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_class]', 'name': '"""original_output_ph"""'}), "(tf.float32, shape=[None, num_class], name='original_output_ph')\n", (4588, 4652), True, 'import tensorflow as tf\n'), ((4740, 4781), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_0"""'}), "(tf.float32, name='beta_0')\n", (4754, 4781), True, 'import tensorflow as tf\n'), ((4807, 4848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_1"""'}), "(tf.float32, name='beta_1')\n", (4821, 4848), True, 'import tensorflow as tf\n'), ((5682, 5755), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[test_image.shape[-3] * test_image.shape[-2]]'], {}), '(self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])\n', (5692, 5755), True, 'import numpy as np\n'), ((5872, 5887), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (5880, 5887), True, 'import numpy as np\n'), ((6866, 6906), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['NET.saliency_flatten', 'k_top'], {}), '(NET.saliency_flatten, k_top)\n', (6877, 6906), True, 'import tensorflow as tf\n'), ((7204, 7254), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency_flatten * self.topk_ph)'], {}), '(NET.saliency_flatten * self.topk_ph)\n', (7217, 7254), True, 'import tensorflow as tf\n'), ((13282, 13303), 'numpy.sign', 'np.sign', (['perturbation'], {}), '(perturbation)\n', (13289, 13303), True, 'import numpy as np\n'), ((13932, 14002), 'numpy.clip', 'np.clip', (['out_image', '(-self.mean_image)', '(self.pixel_max - self.mean_image)'], {}), '(out_image, -self.mean_image, self.pixel_max - self.mean_image)\n', (13939, 14002), True, 'import numpy as np\n'), ((18257, 18281), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (18263, 18281), True, 'import numpy as np\n'), ((22283, 22340), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w * h]', 'name': '"""topk_ph"""'}), "(tf.float32, shape=[w * h], name='topk_ph')\n", (22297, 22340), True, 'import tensorflow as tf\n'), ((22447, 22507), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[2]', 'name': '"""mass_center_ph"""'}), "(tf.float32, shape=[2], name='mass_center_ph')\n", (22461, 22507), True, 'import tensorflow as tf\n'), ((22627, 22689), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w, h]', 'name': '"""target_map_ph"""'}), "(tf.float32, shape=[w, h], name='target_map_ph')\n", (22641, 22689), True, 'import tensorflow as tf\n'), ((22803, 22844), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_0"""'}), "(tf.float32, name='beta_0')\n", (22817, 22844), True, 'import tensorflow as tf\n'), ((22870, 22911), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_1"""'}), "(tf.float32, name='beta_1')\n", (22884, 22911), True, 'import tensorflow as tf\n'), ((23066, 23144), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_class]', 'name': '"""original_output_ph"""'}), "(tf.float32, shape=[None, num_class], name='original_output_ph')\n", (23080, 23144), True, 'import tensorflow as tf\n'), ((24615, 24688), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[test_image.shape[-3] * test_image.shape[-2]]'], {}), '(self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])\n', (24625, 24688), True, 'import numpy as np\n'), ((24805, 24820), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (24813, 24820), True, 'import numpy as np\n'), ((25238, 25278), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['NET.saliency_flatten', 'k_top'], {}), '(NET.saliency_flatten, k_top)\n', (25249, 25278), True, 'import tensorflow as tf\n'), ((25576, 25626), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency_flatten * self.topk_ph)'], {}), '(NET.saliency_flatten * self.topk_ph)\n', (25589, 25626), True, 'import tensorflow as tf\n'), ((26771, 26796), 'numpy.array', 'np.array', (['counterfactuals'], {}), '(counterfactuals)\n', (26779, 26796), True, 'import numpy as np\n'), ((32895, 32923), 'numpy.sign', 'np.sign', (['perturbation_summed'], {}), '(perturbation_summed)\n', (32902, 32923), True, 'import numpy as np\n'), ((33552, 33622), 'numpy.clip', 'np.clip', (['out_image', '(-self.mean_image)', '(self.pixel_max - self.mean_image)'], {}), '(out_image, -self.mean_image, self.pixel_max - self.mean_image)\n', (33559, 33622), True, 'import numpy as np\n'), ((38426, 38450), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (38432, 38450), True, 'import numpy as np\n'), ((42463, 42520), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w * h]', 'name': '"""topk_ph"""'}), "(tf.float32, shape=[w * h], name='topk_ph')\n", (42477, 42520), True, 'import tensorflow as tf\n'), ((42627, 42687), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[2]', 'name': '"""mass_center_ph"""'}), "(tf.float32, shape=[2], name='mass_center_ph')\n", (42641, 42687), True, 'import tensorflow as tf\n'), ((42807, 42869), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[w, h]', 'name': '"""target_map_ph"""'}), "(tf.float32, shape=[w, h], name='target_map_ph')\n", (42821, 42869), True, 'import tensorflow as tf\n'), ((42983, 43024), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_0"""'}), "(tf.float32, name='beta_0')\n", (42997, 43024), True, 'import tensorflow as tf\n'), ((43050, 43091), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta_1"""'}), "(tf.float32, name='beta_1')\n", (43064, 43091), True, 'import tensorflow as tf\n'), ((43246, 43324), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_class]', 'name': '"""original_output_ph"""'}), "(tf.float32, shape=[None, num_class], name='original_output_ph')\n", (43260, 43324), True, 'import tensorflow as tf\n'), ((44291, 44364), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[test_image.shape[-3] * test_image.shape[-2]]'], {}), '(self.saliency1, [test_image.shape[-3] * test_image.shape[-2]])\n', (44301, 44364), True, 'import numpy as np\n'), ((44481, 44496), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (44489, 44496), True, 'import numpy as np\n'), ((45581, 45621), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['NET.saliency_flatten', 'k_top'], {}), '(NET.saliency_flatten, k_top)\n', (45592, 45621), True, 'import tensorflow as tf\n'), ((45919, 45969), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency_flatten * self.topk_ph)'], {}), '(NET.saliency_flatten * self.topk_ph)\n', (45932, 45969), True, 'import tensorflow as tf\n'), ((47179, 47204), 'numpy.array', 'np.array', (['counterfactuals'], {}), '(counterfactuals)\n', (47187, 47204), True, 'import numpy as np\n'), ((52460, 52484), 'numpy.mean', 'np.mean', (['perturbation', '(0)'], {}), '(perturbation, 0)\n', (52467, 52484), True, 'import numpy as np\n'), ((52500, 52528), 'numpy.sign', 'np.sign', (['perturbation_summed'], {}), '(perturbation_summed)\n', (52507, 52528), True, 'import numpy as np\n'), ((53148, 53218), 'numpy.clip', 'np.clip', (['out_image', '(-self.mean_image)', '(self.pixel_max - self.mean_image)'], {}), '(out_image, -self.mean_image, self.pixel_max - self.mean_image)\n', (53155, 53218), True, 'import numpy as np\n'), ((57542, 57566), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (57548, 57566), True, 'import numpy as np\n'), ((59693, 59718), 'numpy.array', 'np.array', (['counterfactuals'], {}), '(counterfactuals)\n', (59701, 59718), True, 'import numpy as np\n'), ((836, 852), '_pickle.load', 'pkl.load', (['inputs'], {}), '(inputs)\n', (844, 852), True, 'import _pickle as pkl\n'), ((6542, 6572), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (6551, 6572), True, 'import numpy as np\n'), ((6733, 6757), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (6739, 6757), True, 'import numpy as np\n'), ((6944, 6956), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (6953, 6956), True, 'import numpy as np\n'), ((6958, 6970), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (6967, 6970), True, 'import numpy as np\n'), ((7355, 7414), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((NET.mass_center - self.mass_center_ph) ** 2)'], {}), '((NET.mass_center - self.mass_center_ph) ** 2)\n', (7368, 7414), True, 'import tensorflow as tf\n'), ((7625, 7678), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.target_map_ph', 'NET.saliency'], {}), '(self.target_map_ph, NET.saliency)\n', (7644, 7678), True, 'import tensorflow as tf\n'), ((7704, 7760), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.original_output_ph', 'NET.output'], {}), '(self.original_output_ph, NET.output)\n', (7723, 7760), True, 'import tensorflow as tf\n'), ((12130, 12162), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(w, h, c)'}), '(size=(w, h, c))\n', (12146, 12162), True, 'import numpy as np\n'), ((13450, 13473), 'numpy.zeros_like', 'np.zeros_like', (['in_image'], {}), '(in_image)\n', (13463, 13473), True, 'import numpy as np\n'), ((14239, 14257), 'numpy.argmax', 'np.argmax', (['prob', '(1)'], {}), '(prob, 1)\n', (14248, 14257), True, 'import numpy as np\n'), ((18948, 18983), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (18958, 18983), True, 'import numpy as np\n'), ((22132, 22157), 'numpy.zeros_like', 'np.zeros_like', (['test_image'], {}), '(test_image)\n', (22145, 22157), True, 'import numpy as np\n'), ((23645, 23675), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (23654, 23675), True, 'import numpy as np\n'), ((23836, 23860), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (23842, 23860), True, 'import numpy as np\n'), ((25316, 25328), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (25325, 25328), True, 'import numpy as np\n'), ((25330, 25342), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (25339, 25342), True, 'import numpy as np\n'), ((25757, 25816), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((NET.mass_center - self.mass_center_ph) ** 2)'], {}), '((NET.mass_center - self.mass_center_ph) ** 2)\n', (25770, 25816), True, 'import tensorflow as tf\n'), ((26025, 26078), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.target_map_ph', 'NET.saliency'], {}), '(self.target_map_ph, NET.saliency)\n', (26044, 26078), True, 'import tensorflow as tf\n'), ((26104, 26160), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.original_output_ph', 'NET.output'], {}), '(self.original_output_ph, NET.output)\n', (26123, 26160), True, 'import tensorflow as tf\n'), ((31593, 31641), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.num_steps, w, h, c)'}), '(size=(self.num_steps, w, h, c))\n', (31609, 31641), True, 'import numpy as np\n'), ((33070, 33093), 'numpy.zeros_like', 'np.zeros_like', (['in_image'], {}), '(in_image)\n', (33083, 33093), True, 'import numpy as np\n'), ((33859, 33877), 'numpy.argmax', 'np.argmax', (['prob', '(1)'], {}), '(prob, 1)\n', (33868, 33877), True, 'import numpy as np\n'), ((39188, 39223), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (39198, 39223), True, 'import numpy as np\n'), ((42312, 42337), 'numpy.zeros_like', 'np.zeros_like', (['test_image'], {}), '(test_image)\n', (42325, 42337), True, 'import numpy as np\n'), ((45152, 45182), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (45161, 45182), True, 'import numpy as np\n'), ((45448, 45472), 'numpy.max', 'np.max', (['predicted_scores'], {}), '(predicted_scores)\n', (45454, 45472), True, 'import numpy as np\n'), ((45659, 45671), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (45668, 45671), True, 'import numpy as np\n'), ((45673, 45685), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (45682, 45685), True, 'import numpy as np\n'), ((46100, 46159), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((NET.mass_center - self.mass_center_ph) ** 2)'], {}), '((NET.mass_center - self.mass_center_ph) ** 2)\n', (46113, 46159), True, 'import tensorflow as tf\n'), ((46368, 46421), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.target_map_ph', 'NET.saliency'], {}), '(self.target_map_ph, NET.saliency)\n', (46387, 46421), True, 'import tensorflow as tf\n'), ((46447, 46503), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['self.original_output_ph', 'NET.output'], {}), '(self.original_output_ph, NET.output)\n', (46466, 46503), True, 'import tensorflow as tf\n'), ((51351, 51399), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.num_steps, w, h, c)'}), '(size=(self.num_steps, w, h, c))\n', (51367, 51399), True, 'import numpy as np\n'), ((52675, 52698), 'numpy.zeros_like', 'np.zeros_like', (['in_image'], {}), '(in_image)\n', (52688, 52698), True, 'import numpy as np\n'), ((53455, 53473), 'numpy.argmax', 'np.argmax', (['prob', '(1)'], {}), '(prob, 1)\n', (53464, 53473), True, 'import numpy as np\n'), ((58304, 58339), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (58314, 58339), True, 'import numpy as np\n'), ((513, 537), 'numpy.ones_like', 'np.ones_like', (['MEAN_IMAGE'], {}), '(MEAN_IMAGE)\n', (525, 537), True, 'import numpy as np\n'), ((5796, 5831), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[w * h]'], {}), '(self.saliency1, [w * h])\n', (5806, 5831), True, 'import numpy as np\n'), ((7288, 7322), 'tensorflow.gradients', 'tf.gradients', (['topK_loss', 'NET.input'], {}), '(topK_loss, NET.input)\n', (7300, 7322), True, 'import tensorflow as tf\n'), ((7464, 7505), 'tensorflow.gradients', 'tf.gradients', (['mass_center_loss', 'NET.input'], {}), '(mass_center_loss, NET.input)\n', (7476, 7505), True, 'import tensorflow as tf\n'), ((12365, 12400), 'numpy.reshape', 'np.reshape', (['perturbation', '[w, h, c]'], {}), '(perturbation, [w, h, c])\n', (12375, 12400), True, 'import numpy as np\n'), ((18546, 18581), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (18556, 18581), True, 'import numpy as np\n'), ((24729, 24764), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[w * h]'], {}), '(self.saliency1, [w * h])\n', (24739, 24764), True, 'import numpy as np\n'), ((25690, 25724), 'tensorflow.gradients', 'tf.gradients', (['topK_loss', 'NET.input'], {}), '(topK_loss, NET.input)\n', (25702, 25724), True, 'import tensorflow as tf\n'), ((25865, 25906), 'tensorflow.gradients', 'tf.gradients', (['mass_center_loss', 'NET.input'], {}), '(mass_center_loss, NET.input)\n', (25877, 25906), True, 'import tensorflow as tf\n'), ((31856, 31907), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (31866, 31907), True, 'import numpy as np\n'), ((38786, 38821), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (38796, 38821), True, 'import numpy as np\n'), ((44405, 44440), 'numpy.reshape', 'np.reshape', (['self.saliency1', '[w * h]'], {}), '(self.saliency1, [w * h])\n', (44415, 44440), True, 'import numpy as np\n'), ((45294, 45324), 'numpy.argmax', 'np.argmax', (['predicted_scores', '(1)'], {}), '(predicted_scores, 1)\n', (45303, 45324), True, 'import numpy as np\n'), ((46033, 46067), 'tensorflow.gradients', 'tf.gradients', (['topK_loss', 'NET.input'], {}), '(topK_loss, NET.input)\n', (46045, 46067), True, 'import tensorflow as tf\n'), ((46208, 46249), 'tensorflow.gradients', 'tf.gradients', (['mass_center_loss', 'NET.input'], {}), '(mass_center_loss, NET.input)\n', (46220, 46249), True, 'import tensorflow as tf\n'), ((51614, 51665), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (51624, 51665), True, 'import numpy as np\n'), ((57902, 57937), 'numpy.reshape', 'np.reshape', (['self.saliency2', '[w * h]'], {}), '(self.saliency2, [w * h])\n', (57912, 57937), True, 'import numpy as np\n'), ((7021, 7057), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * x_mesh)'], {}), '(NET.saliency * x_mesh)\n', (7034, 7057), True, 'import tensorflow as tf\n'), ((7081, 7117), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * y_mesh)'], {}), '(NET.saliency * y_mesh)\n', (7094, 7117), True, 'import tensorflow as tf\n'), ((7832, 7858), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['target_dis'], {}), '(target_dis)\n', (7846, 7858), True, 'import tensorflow as tf\n'), ((7912, 7938), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output_dis'], {}), '(output_dis)\n', (7926, 7938), True, 'import tensorflow as tf\n'), ((8035, 8071), 'tensorflow.gradients', 'tf.gradients', (['target_loss', 'NET.input'], {}), '(target_loss, NET.input)\n', (8047, 8071), True, 'import tensorflow as tf\n'), ((12659, 12694), 'numpy.reshape', 'np.reshape', (['perturbation', '[w, h, c]'], {}), '(perturbation, [w, h, c])\n', (12669, 12694), True, 'import numpy as np\n'), ((13635, 13648), 'numpy.sign', 'np.sign', (['pert'], {}), '(pert)\n', (13642, 13648), True, 'import numpy as np\n'), ((17952, 17998), 'numpy.abs', 'np.abs', (['(self.test_image - self.perturbed_image)'], {}), '(self.test_image - self.perturbed_image)\n', (17958, 17998), True, 'import numpy as np\n'), ((18619, 18655), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'self.top2'], {}), '(self.topK, self.top2)\n', (18633, 18655), True, 'import numpy as np\n'), ((25393, 25429), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * x_mesh)'], {}), '(NET.saliency * x_mesh)\n', (25406, 25429), True, 'import tensorflow as tf\n'), ((25453, 25489), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * y_mesh)'], {}), '(NET.saliency * y_mesh)\n', (25466, 25489), True, 'import tensorflow as tf\n'), ((26232, 26258), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['target_dis'], {}), '(target_dis)\n', (26246, 26258), True, 'import tensorflow as tf\n'), ((26312, 26338), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output_dis'], {}), '(output_dis)\n', (26326, 26338), True, 'import tensorflow as tf\n'), ((26435, 26471), 'tensorflow.gradients', 'tf.gradients', (['target_loss', 'NET.input'], {}), '(target_loss, NET.input)\n', (26447, 26471), True, 'import tensorflow as tf\n'), ((32178, 32229), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (32188, 32229), True, 'import numpy as np\n'), ((33255, 33268), 'numpy.sign', 'np.sign', (['pert'], {}), '(pert)\n', (33262, 33268), True, 'import numpy as np\n'), ((37615, 37661), 'numpy.abs', 'np.abs', (['(self.test_image - self.perturbed_image)'], {}), '(self.test_image - self.perturbed_image)\n', (37621, 37661), True, 'import numpy as np\n'), ((38859, 38895), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'self.top2'], {}), '(self.topK, self.top2)\n', (38873, 38895), True, 'import numpy as np\n'), ((45736, 45772), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * x_mesh)'], {}), '(NET.saliency * x_mesh)\n', (45749, 45772), True, 'import tensorflow as tf\n'), ((45796, 45832), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(NET.saliency * y_mesh)'], {}), '(NET.saliency * y_mesh)\n', (45809, 45832), True, 'import tensorflow as tf\n'), ((46575, 46601), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['target_dis'], {}), '(target_dis)\n', (46589, 46601), True, 'import tensorflow as tf\n'), ((46655, 46681), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['output_dis'], {}), '(output_dis)\n', (46669, 46681), True, 'import tensorflow as tf\n'), ((46778, 46814), 'tensorflow.gradients', 'tf.gradients', (['target_loss', 'NET.input'], {}), '(target_loss, NET.input)\n', (46790, 46814), True, 'import tensorflow as tf\n'), ((51936, 51987), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (51946, 51987), True, 'import numpy as np\n'), ((57236, 57282), 'numpy.abs', 'np.abs', (['(self.test_image - self.perturbed_image)'], {}), '(self.test_image - self.perturbed_image)\n', (57242, 57282), True, 'import numpy as np\n'), ((57975, 58011), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'self.top2'], {}), '(self.topK, self.top2)\n', (57989, 58011), True, 'import numpy as np\n'), ((14883, 14947), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (14904, 14947), False, 'import scipy\n'), ((34578, 34642), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (34599, 34642), False, 'import scipy\n'), ((54174, 54238), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (54195, 54238), False, 'import scipy\n'), ((59564, 59609), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'in_image.shape'}), '(-1, 1, size=in_image.shape)\n', (59581, 59609), True, 'import numpy as np\n'), ((13231, 13266), 'numpy.reshape', 'np.reshape', (['perturbation', '[w, h, c]'], {}), '(perturbation, [w, h, c])\n', (13241, 13266), True, 'import numpy as np\n'), ((14494, 14525), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'top2'], {}), '(self.topK, top2)\n', (14508, 14525), True, 'import numpy as np\n'), ((15227, 15270), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mass_center1 - center2)'], {}), '(self.mass_center1 - center2)\n', (15241, 15270), True, 'import numpy as np\n'), ((15556, 15628), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (15585, 15628), False, 'import scipy\n'), ((32613, 32664), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (32623, 32664), True, 'import numpy as np\n'), ((34189, 34220), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'top2'], {}), '(self.topK, top2)\n', (34203, 34220), True, 'import numpy as np\n'), ((34922, 34965), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mass_center1 - center2)'], {}), '(self.mass_center1 - center2)\n', (34936, 34965), True, 'import numpy as np\n'), ((35251, 35323), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (35280, 35323), False, 'import scipy\n'), ((52336, 52387), 'numpy.reshape', 'np.reshape', (['perturbation', '[self.num_steps, w, h, c]'], {}), '(perturbation, [self.num_steps, w, h, c])\n', (52346, 52387), True, 'import numpy as np\n'), ((53785, 53816), 'numpy.intersect1d', 'np.intersect1d', (['self.topK', 'top2'], {}), '(self.topK, top2)\n', (53799, 53816), True, 'import numpy as np\n'), ((54518, 54561), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mass_center1 - center2)'], {}), '(self.mass_center1 - center2)\n', (54532, 54561), True, 'import numpy as np\n'), ((54847, 54919), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['self.saliency1_flatten', 'saliency2_flatten'], {}), '(self.saliency1_flatten, saliency2_flatten)\n', (54876, 54919), False, 'import scipy\n')]
|
"""Implementation of the setuptools command 'pyecore'."""
import collections
import contextlib
import distutils.log as logger
import logging
import pathlib
import shlex
import pyecore.resources
import pyecoregen.ecore
import setuptools
class PyEcoreCommand(setuptools.Command):
"""A setuptools command for generating Python code from Ecore models.
An extra command for setuptools to generate static Python classes from Ecore models. The pyecore
command wraps pyecoregen - the real Python code generator for Ecore models. It searches for
Ecore models starting from the base directory and generates a Python package for each found
Ecore model.
:cvar _ECORE_FILE_EXT: File extension of Ecore XMI file
:cvar description: Description of ecore command
:cvar user_options: Options which can be passed by the user
:cvar boolean_options: Subset of user options which are binary
"""
_ECORE_FILE_EXT = 'ecore'
description = 'generate Python code from Ecore models'
user_options = [
('ecore-models=', 'e', 'specify Ecore models to generate code for'),
('output=', 'o', 'specify directories where output is generated'),
('user-modules=', None, 'dotted names of modules with user-provided mixins to import from '
'generated classes'),
('auto-register-package', None, 'Generate package auto-registration for the PyEcore '
'\'global_registry\''),
]
boolean_options = ['auto-register-package']
def initialize_options(self):
"""Set default values for all the options that this command supports. Note that these
defaults may be overridden by other commands, by the setup script, by config files, or by
the command-line.
"""
self.ecore_models = None
self.output = ''
self.user_modules = ''
self.auto_register_package = 0
def finalize_options(self):
"""Set final values for all the options that this command supports. This is always called
as late as possible, ie. after any option assignments from the command-line or from other
commands have been done.
"""
# parse ecore-models option
if self.ecore_models:
self.ecore_models = shlex.split(self.ecore_models, comments=True)
# parse output option
tokens = shlex.split(self.output, comments=True)
self.output = collections.defaultdict(lambda: None)
for token in tokens:
model, output = token.split('=', 1)
# check if model and output are specified
if model and output:
# add relative output path to dictionary
output_path = pathlib.Path(output).relative_to('.')
if model == 'default':
self.output.default_factory = lambda: output_path
else:
self.output[model] = output_path
else:
logger.warn('Ignoring invalid output specifier {!r}.', token)
# parse user-modules option
tokens = shlex.split(self.user_modules, comments=True)
self.user_modules = {}
for token in tokens:
model, user_module = token.split('=', 1)
# check if model and user module are specified
if model and user_module:
self.user_modules[model] = user_module
else:
logger.warn('Ignoring invalid user module specifier {!r}.', token)
def _configure_logging(self):
"""Configure logging using global verbosity level of distutils."""
loglevel_map = collections.defaultdict(lambda: logging.WARNING)
loglevel_map.update({
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
})
logging.basicConfig(
format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
level=loglevel_map[self.distribution.verbose]
)
def _find_ecore_xmi_files(self, base_path=pathlib.Path('.')):
"""Search for all Ecore XMI files starting from base directory and returns a list of them.
:param base_path: base path to search for Ecore XMI files
:return: a list of all found Ecore XMI files
"""
pattern = '*.{}'.format(self._ECORE_FILE_EXT)
logger.debug('searching for Ecore XMI files in \'{!s}\''.format(str(base_path)))
return sorted(base_path.rglob(pattern))
@staticmethod
@contextlib.contextmanager
def _load_ecore_model(ecore_model_path):
"""Load a single Ecore model from a Ecore XMI file and return the root package.
:param ecore_model_path: path to Ecore XMI file
:return: root package of the Ecore model
"""
rset = pyecore.resources.ResourceSet()
try:
logger.debug('loading \'{!s}\''.format(str(ecore_model_path)))
resource = rset.get_resource(ecore_model_path.as_posix())
yield resource.contents[0]
except Exception:
raise
else:
rset.remove_resource(resource)
def run(self):
"""Perform all tasks necessary to generate Python packages representing the classes from
Ecore models. This process is controlled by the user options passed on the command line or
set internally to default values.
"""
self._configure_logging()
# find Ecore XMI files
ecore_xmi_files = self._find_ecore_xmi_files()
# load each Ecore model
for ecore_xmi_file in ecore_xmi_files:
with self._load_ecore_model(ecore_xmi_file) as resource:
if self.ecore_models is None or resource.name in self.ecore_models:
# configure EcoreGenerator
kwargs = {}
if self.auto_register_package:
kwargs['auto_register_package'] = True
if resource.name in self.user_modules:
kwargs['user_module'] = self.user_modules[resource.name]
if self.output[resource.name]:
output_dir = self.output[resource.name]
else:
output_dir = ecore_xmi_file.parent
# generate Python classes
logger.info(
'running pyecoregen to generate code for {!r} metamodel'.format(resource.name)
)
pyecoregen.ecore.EcoreGenerator(**kwargs).generate(
resource,
output_dir.as_posix()
)
else:
logger.debug('skipping {!r} metamodel'.format(resource.name))
|
[
"distutils.log.warn",
"logging.basicConfig",
"shlex.split",
"collections.defaultdict",
"pathlib.Path"
] |
[((2409, 2448), 'shlex.split', 'shlex.split', (['self.output'], {'comments': '(True)'}), '(self.output, comments=True)\n', (2420, 2448), False, 'import shlex\n'), ((2471, 2509), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (2494, 2509), False, 'import collections\n'), ((3133, 3178), 'shlex.split', 'shlex.split', (['self.user_modules'], {'comments': '(True)'}), '(self.user_modules, comments=True)\n', (3144, 3178), False, 'import shlex\n'), ((3679, 3728), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : logging.WARNING)'], {}), '(lambda : logging.WARNING)\n', (3702, 3728), False, 'import collections\n'), ((3867, 4002), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s [%(name)s] %(message)s"""', 'level': 'loglevel_map[self.distribution.verbose]'}), "(format=\n '%(asctime)s %(levelname)s [%(name)s] %(message)s', level=loglevel_map[\n self.distribution.verbose])\n", (3886, 4002), False, 'import logging\n'), ((4074, 4091), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (4086, 4091), False, 'import pathlib\n'), ((2315, 2360), 'shlex.split', 'shlex.split', (['self.ecore_models'], {'comments': '(True)'}), '(self.ecore_models, comments=True)\n', (2326, 2360), False, 'import shlex\n'), ((3017, 3078), 'distutils.log.warn', 'logger.warn', (['"""Ignoring invalid output specifier {!r}."""', 'token'], {}), "('Ignoring invalid output specifier {!r}.', token)\n", (3028, 3078), True, 'import distutils.log as logger\n'), ((3479, 3545), 'distutils.log.warn', 'logger.warn', (['"""Ignoring invalid user module specifier {!r}."""', 'token'], {}), "('Ignoring invalid user module specifier {!r}.', token)\n", (3490, 3545), True, 'import distutils.log as logger\n'), ((2761, 2781), 'pathlib.Path', 'pathlib.Path', (['output'], {}), '(output)\n', (2773, 2781), False, 'import pathlib\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 15:05:24 2018
@author: Hendry
"""
from read_data import *
from TokenizeSentences import *
import numpy as np
def onehot(data,nClass):
data2 = np.zeros([len(data),nClass])
for i in range(nClass):
data2[np.where(data==i),i]= 1
return data2
def get_text_idx(text,vocab,max_document_length):
text_array = np.zeros([len(text), max_document_length],dtype=np.int32)
for i,x in enumerate(text):
words = x
for j, w in enumerate(words):
if w in vocab:
text_array[i, j] = vocab[w]
else :
text_array[i, j] = vocab['the']
return text_array
def loaddata(w2v_model,typeOfClassify = 0,useTextsum= 1):
train_bodies = readRawData('train_bodies.csv')
if useTextsum == 0:
trainDocs = TokenizeSentences(splitData(train_bodies,1))
else:
f = open('./fnc_data/train_1.txt','r')
data = f.readlines()
f.close()
trainDocs = TokenizeSentences(data)
trainDocsIdx = np.array(splitData(train_bodies,0)).astype('int')
train_stances = readRawData('train_stances.csv')
trainTitle = TokenizeSentences(splitData(train_stances,0))
trainTitleIdx = np.array(splitData(train_stances,1)).astype('int')
trainRes = np.array(splitData(train_stances,2))
trainRes[np.where(trainRes=='unrelated')]='0'
trainRes[np.where(trainRes=='agree')]='1'
trainRes[np.where(trainRes=='disagree')]='2'
trainRes[np.where(trainRes=='discuss')]='3'
trainRes =trainRes.astype('int')
maxDocLength = 0
for i in range(len(trainDocs)):
maxDocLength = max(maxDocLength,len(trainDocs[i]))
maxTitleLength = 0
for i in range(len(trainTitle)):
maxTitleLength = max(maxTitleLength,len(trainTitle[i]))
trainDocs = get_text_idx(trainDocs,w2v_model.vocab_hash,maxDocLength)
trainTitle = get_text_idx(trainTitle,w2v_model.vocab_hash,maxTitleLength)
trainTitleDocs = [[] for i in range(len(trainTitle))]
for i in range(len(trainTitle)):
idx = np.where(trainDocsIdx==trainTitleIdx[i])
trainTitleDocs[i]=trainDocs[int(idx[0])]
trainTitleDocs = np.array(trainTitleDocs)
trainDocs = np.array(trainDocs)
trainTitle = np.array(trainTitle)
uniIdx = np.unique(trainTitleIdx)
uniIdxTest = uniIdx[round(0.95*len(uniIdx)):]
validIdx = np.argwhere(trainTitleIdx == uniIdxTest[0])
for i in range(len(uniIdxTest)-1):
validIdx = np.append(validIdx,np.argwhere(trainTitleIdx == uniIdxTest[i+1]))
validIdx = sorted(validIdx)
fullIdx = list(range(len(trainTitleIdx)))
trainIdx = list(set(fullIdx).difference(set(validIdx)))
x1Train = trainTitleDocs[trainIdx]
x2Train = trainTitle[trainIdx]
trainRes = np.array(trainRes)
y0Train = trainRes[trainIdx]
x1Valid = trainTitleDocs[validIdx]
x2Valid = trainTitle[validIdx]
y0Valid = trainRes[validIdx]
if typeOfClassify==0:
yValid = onehot(y0Valid,4)
yTrain = onehot(y0Train,4)
elif typeOfClassify==1:
y0Train[y0Train>0]=1
y0Valid[y0Valid>0]=1
yValid = onehot(y0Valid,2)
yTrain = onehot(y0Train,2)
elif typeOfClassify==2:
x1Train = x1Train[y0Train>0]
x2Train = x2Train[y0Train>0]
y0Train = y0Train[y0Train>0]-1
x1Valid = x1Valid[y0Valid>0]
x2Valid = x2Valid[y0Valid>0]
y0Valid = y0Valid[y0Valid>0]-1
yValid = onehot(y0Valid,3)
yTrain = onehot(y0Train,3)
vocab_size = len(w2v_model.vocab_hash)
return x1Train, x1Valid, x2Train, x2Valid, yTrain, yValid, vocab_size
|
[
"numpy.argwhere",
"numpy.where",
"numpy.array",
"numpy.unique"
] |
[((2079, 2103), 'numpy.array', 'np.array', (['trainTitleDocs'], {}), '(trainTitleDocs)\n', (2087, 2103), True, 'import numpy as np\n'), ((2117, 2136), 'numpy.array', 'np.array', (['trainDocs'], {}), '(trainDocs)\n', (2125, 2136), True, 'import numpy as np\n'), ((2151, 2171), 'numpy.array', 'np.array', (['trainTitle'], {}), '(trainTitle)\n', (2159, 2171), True, 'import numpy as np\n'), ((2182, 2206), 'numpy.unique', 'np.unique', (['trainTitleIdx'], {}), '(trainTitleIdx)\n', (2191, 2206), True, 'import numpy as np\n'), ((2267, 2310), 'numpy.argwhere', 'np.argwhere', (['(trainTitleIdx == uniIdxTest[0])'], {}), '(trainTitleIdx == uniIdxTest[0])\n', (2278, 2310), True, 'import numpy as np\n'), ((2639, 2657), 'numpy.array', 'np.array', (['trainRes'], {}), '(trainRes)\n', (2647, 2657), True, 'import numpy as np\n'), ((1307, 1340), 'numpy.where', 'np.where', (["(trainRes == 'unrelated')"], {}), "(trainRes == 'unrelated')\n", (1315, 1340), True, 'import numpy as np\n'), ((1354, 1383), 'numpy.where', 'np.where', (["(trainRes == 'agree')"], {}), "(trainRes == 'agree')\n", (1362, 1383), True, 'import numpy as np\n'), ((1397, 1429), 'numpy.where', 'np.where', (["(trainRes == 'disagree')"], {}), "(trainRes == 'disagree')\n", (1405, 1429), True, 'import numpy as np\n'), ((1443, 1474), 'numpy.where', 'np.where', (["(trainRes == 'discuss')"], {}), "(trainRes == 'discuss')\n", (1451, 1474), True, 'import numpy as np\n'), ((1974, 2016), 'numpy.where', 'np.where', (['(trainDocsIdx == trainTitleIdx[i])'], {}), '(trainDocsIdx == trainTitleIdx[i])\n', (1982, 2016), True, 'import numpy as np\n'), ((2382, 2429), 'numpy.argwhere', 'np.argwhere', (['(trainTitleIdx == uniIdxTest[i + 1])'], {}), '(trainTitleIdx == uniIdxTest[i + 1])\n', (2393, 2429), True, 'import numpy as np\n'), ((280, 299), 'numpy.where', 'np.where', (['(data == i)'], {}), '(data == i)\n', (288, 299), True, 'import numpy as np\n')]
|
import os
import sys
from pathlib import Path
sys.path.insert(0, os.path.abspath('..'))
import googlemaps_helpers
ROOT = Path('.')
DATA_DIR = Path('tests/data')
|
[
"pathlib.Path",
"os.path.abspath"
] |
[((123, 132), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (127, 132), False, 'from pathlib import Path\n'), ((144, 162), 'pathlib.Path', 'Path', (['"""tests/data"""'], {}), "('tests/data')\n", (148, 162), False, 'from pathlib import Path\n'), ((66, 87), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (81, 87), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the base classes used
when defining mutation strategies for pfp
"""
import glob
import os
import six
get_strategy = None
StratGroup = None
FieldStrat = None
def init():
global get_strategy
global StratGroup
global FieldStrat
import pfp.fuzz.strats
get_strategy = pfp.fuzz.strats.get_strategy
StratGroup = pfp.fuzz.strats.StratGroup
FieldStrat = pfp.fuzz.strats.FieldStrat
# load all of the built-in strategies
for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), "*.py")):
filename = os.path.basename(strat_file)
if filename in ["__init__.py", "base.py"]:
continue
mod_name = filename.replace(".py", "").replace(".pyc", "")
__import__("pfp.fuzz." + mod_name)
def mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False):
"""Mutate the provided field (probably a Dom or struct instance) using the
strategy specified with ``strat_name_or_class``, yielding ``num`` mutations
that affect up to ``at_once`` fields at once.
This function will yield back the field after each mutation, optionally
also yielding a ``set`` of fields that were mutated in that iteration (if ``yield_changed`` is
``True``). It should also be noted that the yielded set of changed fields *can*
be modified and is no longer needed by the mutate() function.
:param pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs)
:param strat_name_or_class: Can be the name of a strategy, or the actual strategy class (not an instance)
:param int num: The number of mutations to yield
:param int at_once: The number of fields to mutate at once
:param bool yield_changed: Yield a list of fields changed along with the mutated dom
:returns: generator
"""
import pfp.fuzz.rand as rand
init()
strat = get_strategy(strat_name_or_cls)
to_mutate = strat.which(field)
with_strats = []
for to_mutate_field in to_mutate:
field_strat = strat.get_field_strat(to_mutate_field)
if field_strat is not None:
with_strats.append((to_mutate_field, field_strat))
# we don't need these ones anymore
del to_mutate
# save the current value of all subfields without
# triggering events
field._pfp__snapshot(recurse=True)
count = 0
for x in six.moves.range(num):
chosen_fields = set()
idx_pool = set([x for x in six.moves.xrange(len(with_strats))])
# modify `at_once` number of fields OR len(with_strats) number of fields,
# whichever is lower
for at_onces in six.moves.xrange(min(len(with_strats), at_once)):
# we'll never pull the same idx from idx_pool more than once
# since we're removing the idx after choosing it
rand_idx = rand.sample(idx_pool, 1)[0]
idx_pool.remove(rand_idx)
rand_field,field_strat = with_strats[rand_idx]
chosen_fields.add(rand_field)
field_strat.mutate(rand_field)
if yield_changed:
yield field, chosen_fields
else:
# yield back the original field
yield field
# restore the saved value of all subfields without
# triggering events
field._pfp__restore_snapshot(recurse=True)
|
[
"os.path.dirname",
"pfp.fuzz.rand.sample",
"six.moves.range",
"os.path.basename"
] |
[((2442, 2462), 'six.moves.range', 'six.moves.range', (['num'], {}), '(num)\n', (2457, 2462), False, 'import six\n'), ((623, 651), 'os.path.basename', 'os.path.basename', (['strat_file'], {}), '(strat_file)\n', (639, 651), False, 'import os\n'), ((567, 592), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (582, 592), False, 'import os\n'), ((2910, 2934), 'pfp.fuzz.rand.sample', 'rand.sample', (['idx_pool', '(1)'], {}), '(idx_pool, 1)\n', (2921, 2934), True, 'import pfp.fuzz.rand as rand\n')]
|
import torch
def is_pos_def(x):
if torch.equal(x, x.t()):
try:
torch.linalg.cholesky(x)
return True
except RuntimeError:
return False
else:
return False
|
[
"torch.linalg.cholesky"
] |
[((88, 112), 'torch.linalg.cholesky', 'torch.linalg.cholesky', (['x'], {}), '(x)\n', (109, 112), False, 'import torch\n')]
|
from rest_framework import serializers
from accounts.serializers import UserSerializer
from .models import Story
class StorySerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Story
fields = [
"id",
"title",
"slug",
"story_url",
"story_body_text",
"number_of_comments",
"number_of_votes",
"url_domain_name",
"rank",
"user",
]
read_only_fields = [
"number_of_comments",
"number_of_votes",
"url_domain_name",
"rank",
"slug",
]
def validate(self, data):
story_url = data.get("story_url", None)
story_body_text = data.get("story_body_text", None)
if story_url is None and story_body_text is None:
raise serializers.ValidationError(
"One of story_url or story_body_text is required."
)
return data
def create(self, validated_data):
user = self.context.get("user")
story = Story.objects.create(user=user, **validated_data)
return story
|
[
"rest_framework.serializers.ValidationError",
"accounts.serializers.UserSerializer"
] |
[((178, 208), 'accounts.serializers.UserSerializer', 'UserSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (192, 208), False, 'from accounts.serializers import UserSerializer\n'), ((918, 997), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""One of story_url or story_body_text is required."""'], {}), "('One of story_url or story_body_text is required.')\n", (945, 997), False, 'from rest_framework import serializers\n')]
|
#!/usr/bin/env python
#
# Author: <NAME>.
# Created: Dec 11, 2014.
"""Some utility functions to handle images."""
import math
import numpy as np
import PIL.Image
from PIL.Image import ROTATE_180, ROTATE_90, ROTATE_270, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT
import skimage.transform
def imcast(img, dtype, color_space="default"):
"""Cast the input image to a given data type.
Parameters
----------
img: ndarray
The input image.
dtype: np.dtype
The type that output image to be cast into.
color_space: string, optional
The color space of the input image, which affects the casting operation.
Returns
-------
The output image that is cast into `dtype`.
Notes
-----
* For `color_space=="default"`, we perform a linear scaling with following
range conventions:
* `np.uint8`: `[0, 255]`;
* `np.uint16`: `[0, 65535]`;
* `np.float32` and `np.float64`: `[0.0, 1.0]`.
For example, if the input `img` is of `np.uint8` type and the expected
`dtype` is `np.float32`, then the output will be
`np.asarray(img / 255., np.float32)`.
* For `color_space=="CIE-L*a*b*"`, the "normal" value ranges are
`0 <= L <= 100, -127 <= a, b <= 127`, and we perform the following cast:
* `np.uint8`: `L <- L * 255 / 100, a <- a + 128, b <- b + 128`;
* `np.uint16`: currently not supported;
* `np.float32` and `np.float64`: left as is.
"""
if img.dtype == dtype:
return img
if color_space == "default":
if dtype == np.uint8:
if img.dtype == np.uint16:
return np.asarray(img / 257, np.uint8)
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img * 255., np.uint8)
elif dtype == np.uint16:
if img.dtype == np.uint8:
return np.asarray(img, np.uint16) * 257
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img * 65535., np.uint16)
elif dtype == np.float32 or dtype == np.float64:
if img.dtype == np.uint8:
return np.asarray(img, dtype) / 255.
elif img.dtype == np.uint16:
return np.asarray(img, dtype) / 65535.
elif img.dtype == np.float32 or img.dtype == np.float64:
return np.asarray(img, dtype)
elif color_space == "CIE-L*a*b*":
if dtype == np.uint8:
if img.dtype == np.float32 or img.dtype == np.float64:
dst = np.empty(img.shape, np.uint8)
dst[:,:,0] = img[:,:,0] * 255. / 100.
dst[:,:,1] = img[:,:,1] + 128.
dst[:,:,2] = img[:,:,2] + 128.
return dst
elif dtype == np.float32 or dtype == np.float64:
if img.dtype == np.uint8:
dst = np.empty(img.shape, dtype)
dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100.
dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128.
dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128.
return dst
raise Exception(
"Unexpected conversion from '%s' to '%s' with '%s' color space" % \
(img.dtype, dtype, color_space))
def imread(filename, dtype=np.uint8, color_space="default"):
"""Read the image followed by an :py:func:`imcast`."""
img = PIL.Image.open(filename)
if img.mode != "RGB":
img = img.convert("RGB")
if hasattr(img, "_getexif"):
try:
exif = img._getexif() or {}
except IOError:
exif = {}
orientation = exif.get(0x0112)
if orientation:
# see http://park2.wakwak.com/~tsuruzoh/Computer/Digicams/exif-e.html
# for explanation of the magical constants
# or see http://jpegclub.org/exif_orientation.html for a nice visual explanation
# also, rotations are counter-clockwise in PIL
orientation = int(orientation)
rotation = [None, None, ROTATE_180, None, ROTATE_270, ROTATE_270, ROTATE_90, ROTATE_90]
flip = [None, FLIP_LEFT_RIGHT, None, FLIP_TOP_BOTTOM, FLIP_LEFT_RIGHT, None,
FLIP_LEFT_RIGHT, None]
orientation0 = orientation - 1 # it's 1-indexed per the EXIF spec
if 0 <= orientation0 < len(rotation):
if rotation[orientation0] is not None:
img = img.transpose(rotation[orientation0])
if flip[orientation0] is not None:
img = img.transpose(flip[orientation0])
return imcast(np.array(img), dtype, color_space)
def imwrite(filename, img, dtype=np.uint8, color_space="default"):
"""Perform an :py:func:`imcast` before writing to the output file."""
import scipy.misc
return scipy.misc.imsave(filename, imcast(img, dtype, color_space))
def imresize(img, size):
"""Resize the input image.
Parameters
----------
img: ndarray
The input image to be resized.
size: a scalar for `scale` or a 2-tuple for `(num_rows, num_cols)`
One of the `num_rows` or `num_cols` can be -1, which will be inferred
such that the output image has the same aspect ratio as the input.
Returns
-------
The resized image.
"""
if hasattr(size, "__len__"):
num_rows, num_cols = size
assert (num_rows > 0) or (num_cols > 0)
if num_rows < 0:
num_rows = num_cols * img.shape[0] / img.shape[1]
if num_cols < 0:
num_cols = num_rows * img.shape[1] / img.shape[0]
else:
num_rows = int(round(img.shape[0] * size))
num_cols = int(round(img.shape[1] * size))
return skimage.transform.resize(img, (num_rows, num_cols))
def create_icon_mosaic(icons, icon_shape=None,
border_size=1, border_color=None, empty_color=None,
mosaic_shape=None, mosaic_dtype=np.float):
"""Create a mosaic of image icons.
Parameters
----------
icons: a list of `ndarray`s
A list of icons to be put together for mosaic. Currently we require all
icons to be multi-channel images of the same size.
icon_shape: 3-tuple, optional
The shape of icons in the output mosaic as `(num_rows, num_cols, num_channels)`.
If not specified, use the shape of first image in `icons`.
border_size: int, optional
The size of border.
border_color: 3-tuple, optional
The color of border, black if not specified.
empty_color: 3-tuple, optional
The color for empty cells, black if not specified.
mosaic_shape: 2-tuple, optional
The shape of output mosaic as `(num_icons_per_row,
num_icons_per_col)`. If not specified, try to make a square mosaic
according to number of icons.
mosaic_dtype: dtype
The data type of output mosaic.
Returns
-------
The created mosaic image.
"""
# Set default parameters.
num_icons = len(icons)
assert num_icons > 0
if icon_shape is None:
icon_shape = icons[0].shape
assert len(icon_shape) == 3
num_channels = icon_shape[2]
if border_color is None:
border_color = np.zeros(num_channels)
if empty_color is None:
empty_color = np.zeros(num_channels)
if mosaic_shape is None:
num_cols = int(math.ceil(math.sqrt(num_icons)))
num_rows = int(math.ceil(float(num_icons) / num_cols))
mosaic_shape = (num_rows, num_cols)
mosaic_image_shape = (
mosaic_shape[0] * icon_shape[0] + (mosaic_shape[0]-1) * border_size,
mosaic_shape[1] * icon_shape[1] + (mosaic_shape[1]-1) * border_size,
icon_shape[2])
# Create mosaic image and fill with border color.
mosaic_image = np.empty(mosaic_image_shape, dtype=mosaic_dtype)
for c in xrange(mosaic_image.shape[2]):
mosaic_image[:,:,c] = border_color[c]
# Fill in the input icons.
for idx in xrange(num_icons):
i = idx / mosaic_shape[1]
j = idx % mosaic_shape[1]
iStart = i * (icon_shape[0] + border_size)
jStart = j * (icon_shape[1] + border_size)
mosaic_image[iStart:iStart+icon_shape[0],
jStart:jStart+icon_shape[1],:] = icons[idx]
# Fill the empty icons with empty colors.
for idx in xrange(num_icons, mosaic_shape[0]*mosaic_shape[1]):
i = idx / mosaic_shape[1]
j = idx % mosaic_shape[1]
iStart = i * (icon_shape[0] + border_size)
jStart = j * (icon_shape[1] + border_size)
for c in xrange(mosaic_image.shape[2]):
mosaic_image[iStart:iStart+icon_shape[0],
jStart:jStart+icon_shape[1],c] = empty_color[c]
return mosaic_image
def image_size_from_file(filename):
"""Read the image size from a file.
This function only loads but the image header (rather than the whole
rasterized data) in order to determine its dimension.
Parameters
----------
filename: string
The input image file.
Returns
-------
The 2-tuple for image size `(num_rows, num_cols)`.
"""
with PIL.Image.open(filename) as img:
width, height = img.size
return height, width
|
[
"math.sqrt",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.array"
] |
[((7824, 7872), 'numpy.empty', 'np.empty', (['mosaic_image_shape'], {'dtype': 'mosaic_dtype'}), '(mosaic_image_shape, dtype=mosaic_dtype)\n', (7832, 7872), True, 'import numpy as np\n'), ((4631, 4644), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4639, 4644), True, 'import numpy as np\n'), ((7259, 7281), 'numpy.zeros', 'np.zeros', (['num_channels'], {}), '(num_channels)\n', (7267, 7281), True, 'import numpy as np\n'), ((7332, 7354), 'numpy.zeros', 'np.zeros', (['num_channels'], {}), '(num_channels)\n', (7340, 7354), True, 'import numpy as np\n'), ((1634, 1665), 'numpy.asarray', 'np.asarray', (['(img / 257)', 'np.uint8'], {}), '(img / 257, np.uint8)\n', (1644, 1665), True, 'import numpy as np\n'), ((7417, 7437), 'math.sqrt', 'math.sqrt', (['num_icons'], {}), '(num_icons)\n', (7426, 7437), False, 'import math\n'), ((1758, 1791), 'numpy.asarray', 'np.asarray', (['(img * 255.0)', 'np.uint8'], {}), '(img * 255.0, np.uint8)\n', (1768, 1791), True, 'import numpy as np\n'), ((2562, 2591), 'numpy.empty', 'np.empty', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (2570, 2591), True, 'import numpy as np\n'), ((1885, 1911), 'numpy.asarray', 'np.asarray', (['img', 'np.uint16'], {}), '(img, np.uint16)\n', (1895, 1911), True, 'import numpy as np\n'), ((2010, 2046), 'numpy.asarray', 'np.asarray', (['(img * 65535.0)', 'np.uint16'], {}), '(img * 65535.0, np.uint16)\n', (2020, 2046), True, 'import numpy as np\n'), ((2884, 2910), 'numpy.empty', 'np.empty', (['img.shape', 'dtype'], {}), '(img.shape, dtype)\n', (2892, 2910), True, 'import numpy as np\n'), ((2164, 2186), 'numpy.asarray', 'np.asarray', (['img', 'dtype'], {}), '(img, dtype)\n', (2174, 2186), True, 'import numpy as np\n'), ((3013, 3044), 'numpy.asarray', 'np.asarray', (['img[:, :, 1]', 'dtype'], {}), '(img[:, :, 1], dtype)\n', (3023, 3044), True, 'import numpy as np\n'), ((3079, 3110), 'numpy.asarray', 'np.asarray', (['img[:, :, 2]', 'dtype'], {}), '(img[:, :, 2], dtype)\n', (3089, 3110), True, 'import numpy as np\n'), ((2258, 2280), 'numpy.asarray', 'np.asarray', (['img', 'dtype'], {}), '(img, dtype)\n', (2268, 2280), True, 'import numpy as np\n'), ((2382, 2404), 'numpy.asarray', 'np.asarray', (['img', 'dtype'], {}), '(img, dtype)\n', (2392, 2404), True, 'import numpy as np\n'), ((2940, 2971), 'numpy.asarray', 'np.asarray', (['img[:, :, 0]', 'dtype'], {}), '(img[:, :, 0], dtype)\n', (2950, 2971), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
import os
import sys
import numpy as np
from simulater import Simulater
from play_back import PlayBack, PlayBacks
COMMAND = ['UP', 'DOWN', 'LEFT', 'RIGHT']
def get_max_command(target_dict):
return max([(v,k) for k,v in target_dict.items()])[1]
def simplify(command):
return command[0]
def print_Q(Q, x, y):
ret = []
for i in range(y):
ret.append(['0' for _ in range(x)])
for k in Q:
ret[k[1]][k[0]] = simplify(get_max_command(Q[k]))
for this_line in ret:
print(''.join(this_line))
if __name__ == '__main__':
# parameters
file_name = 'default.txt'
epoch_num = 1000
max_trial = 5000
gamma = 0.1
alpha = 0.1
epsilon = 0.5
# make simulater
sim = Simulater(file_name)
# initialize Q value
x, y = sim.map_size()
Q = {}
for i in range(x):
for j in range(y):
Q[(i, j)] = {_:np.random.normal() for _ in COMMAND}
#Q[(i, j)] = {_:0.0 for _ in COMMAND}
# main
minimum_pbs = None
for epoch in range(epoch_num):
sim.reset()
this_pbs = PlayBacks()
for i in range(max_trial):
# get current
current_x, current_y = sim.get_current()
# select_command
tmp_Q = Q[(current_x, current_y)]
command = get_max_command(tmp_Q) if np.random.uniform() > epsilon else np.random.choice(COMMAND)
current_value = tmp_Q[command]
# reward
reward = sim(command)
# update
next_x, next_y = sim.get_current()
next_max_command = get_max_command(Q[(next_x, next_y)])
next_value = Q[(next_x, next_y)][next_max_command]
tmp_Q[command] += alpha * (reward + gamma * next_value - current_value)
# play back
this_pbs.append(PlayBack((current_x, current_y),
command,
(next_x, next_y),
reward))
# end check
if sim.end_episode():
print('find goal')
epsilon *= 0.95
if epsilon < 0.05:
epsilon = 0.05
if minimum_pbs is None:
minimum_pbs = this_pbs
elif len(minimum_pbs) > len(this_pbs):
minimum_pbs = this_pbs
print(epsilon)
break
# update with minimum_pbs
if minimum_pbs is not None:
for pb in minimum_pbs:
tmp_Q = Q[pb.state]
current_value = tmp_Q[pb.action]
next_Q = Q[pb.next_state]
next_max_command = get_max_command(next_Q)
next_value = next_Q[next_max_command]
tmp_Q[pb.action] += alpha * (pb.reward + gamma * next_value - current_value)
sim.printing()
print('---')
print_Q(Q, x, y)
print('---')
|
[
"numpy.random.uniform",
"simulater.Simulater",
"play_back.PlayBacks",
"numpy.random.normal",
"numpy.random.choice",
"play_back.PlayBack"
] |
[((768, 788), 'simulater.Simulater', 'Simulater', (['file_name'], {}), '(file_name)\n', (777, 788), False, 'from simulater import Simulater\n'), ((1127, 1138), 'play_back.PlayBacks', 'PlayBacks', ([], {}), '()\n', (1136, 1138), False, 'from play_back import PlayBack, PlayBacks\n'), ((929, 947), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (945, 947), True, 'import numpy as np\n'), ((1412, 1437), 'numpy.random.choice', 'np.random.choice', (['COMMAND'], {}), '(COMMAND)\n', (1428, 1437), True, 'import numpy as np\n'), ((1898, 1965), 'play_back.PlayBack', 'PlayBack', (['(current_x, current_y)', 'command', '(next_x, next_y)', 'reward'], {}), '((current_x, current_y), command, (next_x, next_y), reward)\n', (1906, 1965), False, 'from play_back import PlayBack, PlayBacks\n'), ((1377, 1396), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1394, 1396), True, 'import numpy as np\n')]
|
from numpy.linalg import cholesky
import numpy as np
def senti2cate(x):
if x<=-0.6:
return 0
elif x>-0.6 and x<=-0.2:
return 1
elif x>-0.2 and x<0.2:
return 2
elif x>=0.2 and x<0.6:
return 3
elif x>=0.6:
return 4
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def auc(label,score):
label=np.array(label)
score=np.array(score)
false_score = score[label==0]
positive_score = score[label==1]
num_positive = (label==1).sum()
num_negative = (label==0).sum()
positive_score = positive_score.reshape((num_positive,1))
positive_score = np.repeat(positive_score,num_negative,axis=1)
false_score = false_score.reshape((1,num_negative))
false_score = np.repeat(false_score,num_positive,axis=0)
return 1-((positive_score<false_score).mean()+0.5*(positive_score==false_score).mean())
def embedding(embfile,word_dict):
emb_dict = {}
with open(embfile,'rb')as f:
while True:
line = f.readline()
if len(line) == 0:
break
data = line.split()
word = data[0].decode()
if len(word) != 0:
vec = [float(x) for x in data[1:]]
if word in word_dict:
emb_dict[word] = vec
emb_table = [0]*len(word_dict)
dummy = np.zeros(300,dtype='float32')
all_emb = []
for i in emb_dict:
emb_table[word_dict[i][0]] = np.array(emb_dict[i],dtype='float32')
all_emb.append(emb_table[word_dict[i][0]])
all_emb = np.array(all_emb,dtype='float32')
mu = np.mean(all_emb, axis=0)
Sigma = np.cov(all_emb.T)
norm = np.random.multivariate_normal(mu, Sigma, 1)
for i in range(len(emb_table)):
if type(emb_table[i]) == int:
emb_table[i] = np.reshape(norm, 300)
emb_table[0] = np.random.uniform(-0.03,0.03,size=(300,))
emb_table = np.array(emb_table,dtype='float32')
return emb_table
|
[
"numpy.random.uniform",
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"numpy.mean",
"numpy.array",
"numpy.take",
"numpy.random.multivariate_normal",
"numpy.reshape",
"numpy.cov",
"numpy.repeat"
] |
[((379, 405), 'numpy.take', 'np.take', (['y_true', 'order[:k]'], {}), '(y_true, order[:k])\n', (386, 405), True, 'import numpy as np\n'), ((497, 522), 'numpy.sum', 'np.sum', (['(gains / discounts)'], {}), '(gains / discounts)\n', (503, 522), True, 'import numpy as np\n'), ((757, 779), 'numpy.take', 'np.take', (['y_true', 'order'], {}), '(y_true, order)\n', (764, 779), True, 'import numpy as np\n'), ((911, 926), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (919, 926), True, 'import numpy as np\n'), ((937, 952), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (945, 952), True, 'import numpy as np\n'), ((1179, 1226), 'numpy.repeat', 'np.repeat', (['positive_score', 'num_negative'], {'axis': '(1)'}), '(positive_score, num_negative, axis=1)\n', (1188, 1226), True, 'import numpy as np\n'), ((1299, 1343), 'numpy.repeat', 'np.repeat', (['false_score', 'num_positive'], {'axis': '(0)'}), '(false_score, num_positive, axis=0)\n', (1308, 1343), True, 'import numpy as np\n'), ((1916, 1946), 'numpy.zeros', 'np.zeros', (['(300)'], {'dtype': '"""float32"""'}), "(300, dtype='float32')\n", (1924, 1946), True, 'import numpy as np\n'), ((2131, 2165), 'numpy.array', 'np.array', (['all_emb'], {'dtype': '"""float32"""'}), "(all_emb, dtype='float32')\n", (2139, 2165), True, 'import numpy as np\n'), ((2174, 2198), 'numpy.mean', 'np.mean', (['all_emb'], {'axis': '(0)'}), '(all_emb, axis=0)\n', (2181, 2198), True, 'import numpy as np\n'), ((2211, 2228), 'numpy.cov', 'np.cov', (['all_emb.T'], {}), '(all_emb.T)\n', (2217, 2228), True, 'import numpy as np\n'), ((2242, 2285), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'Sigma', '(1)'], {}), '(mu, Sigma, 1)\n', (2271, 2285), True, 'import numpy as np\n'), ((2428, 2471), 'numpy.random.uniform', 'np.random.uniform', (['(-0.03)', '(0.03)'], {'size': '(300,)'}), '(-0.03, 0.03, size=(300,))\n', (2445, 2471), True, 'import numpy as np\n'), ((2486, 2522), 'numpy.array', 'np.array', (['emb_table'], {'dtype': '"""float32"""'}), "(emb_table, dtype='float32')\n", (2494, 2522), True, 'import numpy as np\n'), ((340, 359), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (350, 359), True, 'import numpy as np\n'), ((718, 737), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (728, 737), True, 'import numpy as np\n'), ((844, 860), 'numpy.sum', 'np.sum', (['rr_score'], {}), '(rr_score)\n', (850, 860), True, 'import numpy as np\n'), ((863, 877), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (869, 877), True, 'import numpy as np\n'), ((2028, 2066), 'numpy.array', 'np.array', (['emb_dict[i]'], {'dtype': '"""float32"""'}), "(emb_dict[i], dtype='float32')\n", (2036, 2066), True, 'import numpy as np\n'), ((2387, 2408), 'numpy.reshape', 'np.reshape', (['norm', '(300)'], {}), '(norm, 300)\n', (2397, 2408), True, 'import numpy as np\n')]
|
# Databricks notebook source
# MAGIC %md
# MAGIC ### Ingest qualifying json files
# COMMAND ----------
dbutils.widgets.text("p_data_source", "")
v_data_source = dbutils.widgets.get("p_data_source")
# COMMAND ----------
dbutils.widgets.text("p_file_date", "2021-03-21")
v_file_date = dbutils.widgets.get("p_file_date")
# COMMAND ----------
# MAGIC %run "../includes/configuration"
# COMMAND ----------
# MAGIC %run "../includes/common_functions"
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 1 - Read the JSON file using the spark dataframe reader API
# COMMAND ----------
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
# COMMAND ----------
qualifying_schema = StructType(fields=[StructField("qualifyId", IntegerType(), False),
StructField("raceId", IntegerType(), True),
StructField("driverId", IntegerType(), True),
StructField("constructorId", IntegerType(), True),
StructField("number", IntegerType(), True),
StructField("position", IntegerType(), True),
StructField("q1", StringType(), True),
StructField("q2", StringType(), True),
StructField("q3", StringType(), True),
])
# COMMAND ----------
qualifying_df = spark.read \
.schema(qualifying_schema) \
.option("multiLine", True) \
.json(f"{raw_folder_path}/{v_file_date}/qualifying")
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 2 - Rename columns and add new columns
# MAGIC 1. Rename qualifyingId, driverId, constructorId and raceId
# MAGIC 1. Add ingestion_date with current timestamp
# COMMAND ----------
qualifying_with_ingestion_date_df = add_ingestion_date(qualifying_df)
# COMMAND ----------
from pyspark.sql.functions import lit
# COMMAND ----------
final_df = qualifying_with_ingestion_date_df.withColumnRenamed("qualifyId", "qualify_id") \
.withColumnRenamed("driverId", "driver_id") \
.withColumnRenamed("raceId", "race_id") \
.withColumnRenamed("constructorId", "constructor_id") \
.withColumn("ingestion_date", current_timestamp()) \
.withColumn("data_source", lit(v_data_source)) \
.withColumn("file_date", lit(v_file_date))
# COMMAND ----------
# MAGIC %md
# MAGIC ##### Step 3 - Write to output to processed container in parquet format
# COMMAND ----------
#overwrite_partition(final_df, 'f1_processed', 'qualifying', 'race_id')
# COMMAND ----------
merge_condition = "tgt.qualify_id = src.qualify_id AND tgt.race_id = src.race_id"
merge_delta_data(final_df, 'f1_processed', 'qualifying', processed_folder_path, merge_condition, 'race_id')
# COMMAND ----------
dbutils.notebook.exit("Success")
# COMMAND ----------
|
[
"pyspark.sql.functions.lit",
"pyspark.sql.types.IntegerType",
"pyspark.sql.types.StringType"
] |
[((2387, 2403), 'pyspark.sql.functions.lit', 'lit', (['v_file_date'], {}), '(v_file_date)\n', (2390, 2403), False, 'from pyspark.sql.functions import lit\n'), ((2340, 2358), 'pyspark.sql.functions.lit', 'lit', (['v_data_source'], {}), '(v_data_source)\n', (2343, 2358), False, 'from pyspark.sql.functions import lit\n'), ((756, 769), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (767, 769), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((839, 852), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (850, 852), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((923, 936), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (934, 936), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((1012, 1025), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (1023, 1025), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((1094, 1107), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (1105, 1107), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((1178, 1191), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (1189, 1191), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((1256, 1268), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1266, 1268), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((1333, 1345), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1343, 1345), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n'), ((1410, 1422), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1420, 1422), False, 'from pyspark.sql.types import StructType, StructField, IntegerType, StringType\n')]
|
from __future__ import annotations
import typing
from typing_extensions import TypedDict
from ctc import evm
from ctc import rpc
from ctc import spec
old_pool_factory = '0x0959158b6040d32d04c301a72cbfd6b39e21c9ae'
pool_factory = '0xb9fc157394af804a3578134a6585c0dc9cc990d4'
eth_address = '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee'
creation_blocks = {
'0x0959158b6040d32d04c301a72cbfd6b39e21c9ae': 11942404,
'0xb9fc157394af804a3578134a6585c0dc9cc990d4': 12903979,
}
#
# # call based
#
async def async_get_factory_pool_data(
factory: spec.Address,
include_balances: bool = False,
) -> list[CurvePoolData]:
import asyncio
n_pools = await rpc.async_eth_call(
to_address=factory,
function_name='pool_count',
)
coroutines = [
_async_get_pool_data(p, factory, include_balances=include_balances)
for p in range(n_pools)
]
return await asyncio.gather(*coroutines)
class CurvePoolData(TypedDict):
address: spec.Address
tokens: typing.Sequence[spec.Address]
symbols: typing.Sequence[str]
balances: typing.Sequence[int | float | None]
async def _async_get_pool_data(
p: int,
factory: spec.Address,
include_balances: bool = False,
) -> CurvePoolData:
pool = await rpc.async_eth_call(
to_address=factory,
function_name='pool_list',
function_parameters=[p],
)
coins = await rpc.async_eth_call(
to_address=factory,
function_name='get_coins',
function_parameters=[pool],
)
coins = [coin for coin in coins if coin not in [eth_address]]
valid_coins = [
coin
for coin in coins
if coin
not in ['0x0000000000000000000000000000000000000000', eth_address]
]
symbols = await evm.async_get_erc20s_symbols(
valid_coins,
)
if eth_address in coins:
index = coins.index(eth_address)
symbols.insert(index, 'ETH')
if include_balances:
balances: typing.MutableSequence[int | float | None] = (
await evm.async_get_erc20s_balance_of( # type: ignore
tokens=valid_coins,
address=pool,
)
)
if eth_address in coins:
eth_balance = await evm.async_get_eth_balance(pool)
balances.insert(index, eth_balance)
else:
balances = [None for coin in coins]
return {
'address': pool,
'tokens': coins,
'symbols': symbols,
'balances': balances,
}
#
# # event based
#
async def async_get_base_pools(
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
verbose: bool = False,
) -> spec.DataFrame:
import asyncio
import pandas as pd
if start_block is None:
start_block = 12903979
# gather data
coroutines = []
for factory in [old_pool_factory, pool_factory]:
if start_block is None:
factory_start_block = creation_blocks[factory]
else:
factory_start_block = start_block
coroutine = evm.async_get_events(
contract_address=factory,
event_name='BasePoolAdded',
start_block=factory_start_block,
end_block=end_block,
provider=provider,
verbose=verbose,
)
coroutines.append(coroutine)
dfs = await asyncio.gather(*coroutines)
events = pd.concat(dfs)
# format data
events = events.sort_index()
events = events[['contract_address', 'transaction_hash', 'arg__base_pool']]
events = events.rename(
columns={
'contract_address': 'factory',
'arg__base_pool': 'pool',
}
)
return events
async def async_get_plain_pools(
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
verbose: bool = False,
) -> spec.DataFrame:
if start_block is None:
start_block = 12903979
events = await evm.async_get_events(
contract_address=pool_factory,
event_name='PlainPoolDeployed',
start_block=start_block,
end_block=end_block,
provider=provider,
verbose=verbose,
)
events = events[
[
'transaction_hash',
'contract_address',
'arg__coins',
'arg__A',
'arg__fee',
'arg__deployer',
]
]
events = events.rename(
columns={
'contract_address': 'factory',
'arg__coins': 'coins',
'arg__A': 'A',
'arg__fee': 'fee',
'arg__deployer': 'deployer',
}
)
return events
async def async_get_meta_pools(
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
verbose: bool = False,
) -> spec.DataFrame:
import asyncio
import pandas as pd
# gather data
coroutines = []
for factory in [old_pool_factory, pool_factory]:
if start_block is None:
factory_start_block: spec.BlockNumberReference = creation_blocks[
factory
]
else:
factory_start_block = start_block
coroutine = evm.async_get_events(
contract_address=factory,
event_name='MetaPoolDeployed',
start_block=factory_start_block,
end_block=end_block,
provider=provider,
verbose=verbose,
)
coroutines.append(coroutine)
dfs = await asyncio.gather(*coroutines)
events = pd.concat(dfs)
# format data
events = events.sort_index()
events = events[
[
'transaction_hash',
'contract_address',
'arg__coin',
'arg__base_pool',
'arg__A',
'arg__fee',
'arg__deployer',
]
]
events = events.rename(
columns={
'contract_address': 'factory',
'arg__coin': 'coin',
'arg__base_pool': 'base_pool',
'arg__A': 'A',
'arg__fee': 'fee',
'arg__deployer': 'deployer',
}
)
return events
|
[
"asyncio.gather",
"ctc.evm.async_get_erc20s_balance_of",
"ctc.evm.async_get_eth_balance",
"ctc.evm.async_get_events",
"pandas.concat",
"ctc.evm.async_get_erc20s_symbols",
"ctc.rpc.async_eth_call"
] |
[((3506, 3520), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (3515, 3520), True, 'import pandas as pd\n'), ((5805, 5819), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (5814, 5819), True, 'import pandas as pd\n'), ((669, 735), 'ctc.rpc.async_eth_call', 'rpc.async_eth_call', ([], {'to_address': 'factory', 'function_name': '"""pool_count"""'}), "(to_address=factory, function_name='pool_count')\n", (687, 735), False, 'from ctc import rpc\n'), ((911, 938), 'asyncio.gather', 'asyncio.gather', (['*coroutines'], {}), '(*coroutines)\n', (925, 938), False, 'import asyncio\n'), ((1271, 1365), 'ctc.rpc.async_eth_call', 'rpc.async_eth_call', ([], {'to_address': 'factory', 'function_name': '"""pool_list"""', 'function_parameters': '[p]'}), "(to_address=factory, function_name='pool_list',\n function_parameters=[p])\n", (1289, 1365), False, 'from ctc import rpc\n'), ((1412, 1509), 'ctc.rpc.async_eth_call', 'rpc.async_eth_call', ([], {'to_address': 'factory', 'function_name': '"""get_coins"""', 'function_parameters': '[pool]'}), "(to_address=factory, function_name='get_coins',\n function_parameters=[pool])\n", (1430, 1509), False, 'from ctc import rpc\n'), ((1780, 1821), 'ctc.evm.async_get_erc20s_symbols', 'evm.async_get_erc20s_symbols', (['valid_coins'], {}), '(valid_coins)\n', (1808, 1821), False, 'from ctc import evm\n'), ((3164, 3336), 'ctc.evm.async_get_events', 'evm.async_get_events', ([], {'contract_address': 'factory', 'event_name': '"""BasePoolAdded"""', 'start_block': 'factory_start_block', 'end_block': 'end_block', 'provider': 'provider', 'verbose': 'verbose'}), "(contract_address=factory, event_name='BasePoolAdded',\n start_block=factory_start_block, end_block=end_block, provider=provider,\n verbose=verbose)\n", (3184, 3336), False, 'from ctc import evm\n'), ((3465, 3492), 'asyncio.gather', 'asyncio.gather', (['*coroutines'], {}), '(*coroutines)\n', (3479, 3492), False, 'import asyncio\n'), ((4151, 4325), 'ctc.evm.async_get_events', 'evm.async_get_events', ([], {'contract_address': 'pool_factory', 'event_name': '"""PlainPoolDeployed"""', 'start_block': 'start_block', 'end_block': 'end_block', 'provider': 'provider', 'verbose': 'verbose'}), "(contract_address=pool_factory, event_name=\n 'PlainPoolDeployed', start_block=start_block, end_block=end_block,\n provider=provider, verbose=verbose)\n", (4171, 4325), False, 'from ctc import evm\n'), ((5460, 5637), 'ctc.evm.async_get_events', 'evm.async_get_events', ([], {'contract_address': 'factory', 'event_name': '"""MetaPoolDeployed"""', 'start_block': 'factory_start_block', 'end_block': 'end_block', 'provider': 'provider', 'verbose': 'verbose'}), "(contract_address=factory, event_name=\n 'MetaPoolDeployed', start_block=factory_start_block, end_block=\n end_block, provider=provider, verbose=verbose)\n", (5480, 5637), False, 'from ctc import evm\n'), ((5764, 5791), 'asyncio.gather', 'asyncio.gather', (['*coroutines'], {}), '(*coroutines)\n', (5778, 5791), False, 'import asyncio\n'), ((2054, 2119), 'ctc.evm.async_get_erc20s_balance_of', 'evm.async_get_erc20s_balance_of', ([], {'tokens': 'valid_coins', 'address': 'pool'}), '(tokens=valid_coins, address=pool)\n', (2085, 2119), False, 'from ctc import evm\n'), ((2258, 2289), 'ctc.evm.async_get_eth_balance', 'evm.async_get_eth_balance', (['pool'], {}), '(pool)\n', (2283, 2289), False, 'from ctc import evm\n')]
|
import numpy as np
import re
lineRegex = re.compile(r"(turn on|turn off|toggle) (\d+),(\d+) through (\d+),(\d+)")
def day6(fileName):
lights = np.zeros((1000, 1000), dtype=bool)
with open(fileName) as infile:
for line in infile:
match = lineRegex.match(line)
if match:
for x in range(int(match[2]), int(match[4]) + 1):
for y in range(int(match[3]), int(match[5]) + 1):
if match[1] == "turn on":
lights[y, x] = True
elif match[1] == "turn off":
lights[y, x] = False
elif match[1] == "toggle":
lights[y, x] = not lights[y, x]
else:
raise ValueError(f"Unknown directive: {match[1]}")
print(f"There are {lights.sum()} lights!")
def day6b(fileName):
lights = np.zeros((1000, 1000), dtype=int)
with open(fileName) as infile:
for line in infile:
match = lineRegex.match(line)
if match:
x1 = int(match[2])
x2 = int(match[4])
y1 = int(match[3])
y2 = int(match[5])
if match[1] == "turn on":
lights[y1:y2 + 1, x1:x2 + 1] += 1
elif match[1] == "turn off":
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
lights[y, x] = max(lights[y, x] - 1, 0)
elif match[1] == "toggle":
lights[y1:y2 + 1, x1:x2 + 1] += 2
else:
raise ValueError(f"Unknown directive: {match[1]}")
print(f"Brightness: {lights.sum()}")
#day6("6test.txt")
#day6("6.txt")
day6b("6btest.txt")
day6b("6.txt") #15343601
|
[
"numpy.zeros",
"re.compile"
] |
[((42, 117), 're.compile', 're.compile', (['"""(turn on|turn off|toggle) (\\\\d+),(\\\\d+) through (\\\\d+),(\\\\d+)"""'], {}), "('(turn on|turn off|toggle) (\\\\d+),(\\\\d+) through (\\\\d+),(\\\\d+)')\n", (52, 117), False, 'import re\n'), ((146, 180), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {'dtype': 'bool'}), '((1000, 1000), dtype=bool)\n', (154, 180), True, 'import numpy as np\n'), ((734, 767), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {'dtype': 'int'}), '((1000, 1000), dtype=int)\n', (742, 767), True, 'import numpy as np\n')]
|
import requests,time,gevent,gevent.monkey,re,os
from threading import Thread
import schedule
from pyquery import PyQuery as pq
gevent.monkey.patch_socket()
url="http://tieba.baidu.com/mo/q---F55A5B1F58548A7A5403ABA7602FEBAE%3AFG%3D1--1-1-0--2--wapp_1510665393192_464/sign?tbs=af62312bf49309c61510669752&fid=152744&kw="
ba_cookie='把cookie放到这'
headers={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8',
'Cookie':ba_cookie,
'Host':'tieba.baidu.com',
'Proxy-Connection':'keep-alive',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
s=requests.Session()
def run(ba_url,ba_name):
qian_url=ba_url+ba_name
s.get(qian_url,headers=headers)
def go():
taske=[gevent.spawn(run,url,i) for i in ba_name_tuple]
gevent.joinall(taske)
rebuild=re.compile(r"已签到")
def check(ba_name):
content=s.get(url+ba_name,headers=headers).text
return_list=rebuild.findall(content)
if str(return_list)=="['已签到']":
pass
else:
print (ba_name+"-->Error")
def checkth():
for g in ba_name_tuple:
m=Thread(target=check,args=(g,))
m.start()
def writeconfig():
temp=pq(requests.get("http://wapp.baidu.com/",headers={'Cookie':ba_cookie}).content)
ba_all_url="http://"+str([i.attr("href") for i in temp(".my_love_bar").children().items()][-1])[2:]
retemp=re.compile(r">\w*</a>")
ba_name_list=[]
for i in retemp.findall(requests.get(ba_all_url,headers={'Cookie':ba_cookie}).text)[1:-2]:
ba_name_list.append(i[1:-4])
with open("qd_config.ini","w+",encoding="utf-8") as fob:
fob.write(str(tuple(ba_name_list)))
def checkconfig():
if "qd_config.ini" in os.listdir(os.getcwd()):
pass
else:
writeconfig()
def readconfig():
global ba_name_tuple
with open("qd_config.ini","r",encoding="utf-8") as fob:
ba_name_tuple=eval(fob.read())
def serun():
checkconfig()
readconfig()
go()
if __name__=="__main__":
schedule.every().day.at("00:10").do(serun) #每天的签到时间
while 1:
schedule.run_pending()
time.sleep(1)
|
[
"schedule.run_pending",
"threading.Thread",
"re.compile",
"os.getcwd",
"requests.Session",
"time.sleep",
"gevent.monkey.patch_socket",
"requests.get",
"schedule.every",
"gevent.spawn",
"gevent.joinall"
] |
[((127, 155), 'gevent.monkey.patch_socket', 'gevent.monkey.patch_socket', ([], {}), '()\n', (153, 155), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((770, 788), 'requests.Session', 'requests.Session', ([], {}), '()\n', (786, 788), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((969, 986), 're.compile', 're.compile', (['"""已签到"""'], {}), "('已签到')\n", (979, 986), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((939, 960), 'gevent.joinall', 'gevent.joinall', (['taske'], {}), '(taske)\n', (953, 960), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((1472, 1495), 're.compile', 're.compile', (['""">\\\\w*</a>"""'], {}), "('>\\\\w*</a>')\n", (1482, 1495), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((890, 915), 'gevent.spawn', 'gevent.spawn', (['run', 'url', 'i'], {}), '(run, url, i)\n', (902, 915), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((1215, 1246), 'threading.Thread', 'Thread', ([], {'target': 'check', 'args': '(g,)'}), '(target=check, args=(g,))\n', (1221, 1246), False, 'from threading import Thread\n'), ((2097, 2119), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (2117, 2119), False, 'import schedule\n'), ((2122, 2135), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2132, 2135), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((1286, 1355), 'requests.get', 'requests.get', (['"""http://wapp.baidu.com/"""'], {'headers': "{'Cookie': ba_cookie}"}), "('http://wapp.baidu.com/', headers={'Cookie': ba_cookie})\n", (1298, 1355), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((1785, 1796), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1794, 1796), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((1538, 1593), 'requests.get', 'requests.get', (['ba_all_url'], {'headers': "{'Cookie': ba_cookie}"}), "(ba_all_url, headers={'Cookie': ba_cookie})\n", (1550, 1593), False, 'import requests, time, gevent, gevent.monkey, re, os\n'), ((2033, 2049), 'schedule.every', 'schedule.every', ([], {}), '()\n', (2047, 2049), False, 'import schedule\n')]
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import codecs
import errno
import re
import os
import os.path
import shutil
import ssl
import sys
import traceback
from itertools import product
import six
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import URLError
import multiprocessing.pool
try:
# Python 2 had these in the HTMLParser package.
from HTMLParser import HTMLParser, HTMLParseError
except ImportError:
# In Python 3, things moved to html.parser
from html.parser import HTMLParser
# Also, HTMLParseError is deprecated and never raised.
class HTMLParseError(Exception):
pass
from llnl.util.filesystem import mkdirp
import llnl.util.tty as tty
import spack.cmd
import spack.config
import spack.error
import spack.url
import spack.util.crypto
import spack.util.s3 as s3_util
import spack.util.url as url_util
from spack.util.compression import ALLOWED_ARCHIVE_TYPES
# Timeout in seconds for web requests
_timeout = 10
# See docstring for standardize_header_names()
_separators = ('', ' ', '_', '-')
HTTP_HEADER_NAME_ALIASES = {
"Accept-ranges": set(
''.join((A, 'ccept', sep, R, 'anges'))
for A, sep, R in product('Aa', _separators, 'Rr')),
"Content-length": set(
''.join((C, 'ontent', sep, L, 'ength'))
for C, sep, L in product('Cc', _separators, 'Ll')),
"Content-type": set(
''.join((C, 'ontent', sep, T, 'ype'))
for C, sep, T in product('Cc', _separators, 'Tt')),
"Date": set(('Date', 'date')),
"Last-modified": set(
''.join((L, 'ast', sep, M, 'odified'))
for L, sep, M in product('Ll', _separators, 'Mm')),
"Server": set(('Server', 'server'))
}
class LinkParser(HTMLParser):
"""This parser just takes an HTML page and strips out the hrefs on the
links. Good enough for a really simple spider. """
def __init__(self):
HTMLParser.__init__(self)
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr, val in attrs:
if attr == 'href':
self.links.append(val)
class NonDaemonProcess(multiprocessing.Process):
"""Process that allows sub-processes, so pools can have sub-pools."""
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
if sys.version_info[0] < 3:
class NonDaemonPool(multiprocessing.pool.Pool):
"""Pool that uses non-daemon processes"""
Process = NonDaemonProcess
else:
class NonDaemonContext(type(multiprocessing.get_context())):
Process = NonDaemonProcess
class NonDaemonPool(multiprocessing.pool.Pool):
"""Pool that uses non-daemon processes"""
def __init__(self, *args, **kwargs):
kwargs['context'] = NonDaemonContext()
super(NonDaemonPool, self).__init__(*args, **kwargs)
def uses_ssl(parsed_url):
if parsed_url.scheme == 'https':
return True
if parsed_url.scheme == 's3':
endpoint_url = os.environ.get('S3_ENDPOINT_URL')
if not endpoint_url:
return True
if url_util.parse(endpoint_url, scheme='https').scheme == 'https':
return True
return False
__UNABLE_TO_VERIFY_SSL = (
lambda pyver: (
(pyver < (2, 7, 9)) or
((3,) < pyver < (3, 4, 3))
))(sys.version_info)
def read_from_url(url, accept_content_type=None):
url = url_util.parse(url)
context = None
verify_ssl = spack.config.get('config:verify_ssl')
# Don't even bother with a context unless the URL scheme is one that uses
# SSL certs.
if uses_ssl(url):
if verify_ssl:
if __UNABLE_TO_VERIFY_SSL:
# User wants SSL verification, but it cannot be provided.
warn_no_ssl_cert_checking()
else:
# User wants SSL verification, and it *can* be provided.
context = ssl.create_default_context()
else:
# User has explicitly indicated that they do not want SSL
# verification.
context = ssl._create_unverified_context()
req = Request(url_util.format(url))
content_type = None
is_web_url = url.scheme in ('http', 'https')
if accept_content_type and is_web_url:
# Make a HEAD request first to check the content type. This lets
# us ignore tarballs and gigantic files.
# It would be nice to do this with the HTTP Accept header to avoid
# one round-trip. However, most servers seem to ignore the header
# if you ask for a tarball with Accept: text/html.
req.get_method = lambda: "HEAD"
resp = _urlopen(req, timeout=_timeout, context=context)
content_type = resp.headers.get('Content-type')
# Do the real GET request when we know it's just HTML.
req.get_method = lambda: "GET"
response = _urlopen(req, timeout=_timeout, context=context)
if accept_content_type and not is_web_url:
content_type = response.headers.get('Content-type')
reject_content_type = (
accept_content_type and (
content_type is None or
not content_type.startswith(accept_content_type)))
if reject_content_type:
tty.debug("ignoring page {0}{1}{2}".format(
url_util.format(url),
" with content type " if content_type is not None else "",
content_type or ""))
return None, None, None
return response.geturl(), response.headers, response
def warn_no_ssl_cert_checking():
tty.warn("Spack will not check SSL certificates. You need to update "
"your Python to enable certificate verification.")
def push_to_url(local_file_path, remote_path, **kwargs):
keep_original = kwargs.get('keep_original', True)
remote_url = url_util.parse(remote_path)
verify_ssl = spack.config.get('config:verify_ssl')
if __UNABLE_TO_VERIFY_SSL and verify_ssl and uses_ssl(remote_url):
warn_no_ssl_cert_checking()
remote_file_path = url_util.local_file_path(remote_url)
if remote_file_path is not None:
mkdirp(os.path.dirname(remote_file_path))
if keep_original:
shutil.copy(local_file_path, remote_file_path)
else:
try:
os.rename(local_file_path, remote_file_path)
except OSError as e:
if e.errno == errno.EXDEV:
# NOTE(opadron): The above move failed because it crosses
# filesystem boundaries. Copy the file (plus original
# metadata), and then delete the original. This operation
# needs to be done in separate steps.
shutil.copy2(local_file_path, remote_file_path)
os.remove(local_file_path)
elif remote_url.scheme == 's3':
extra_args = kwargs.get('extra_args', {})
remote_path = remote_url.path
while remote_path.startswith('/'):
remote_path = remote_path[1:]
s3 = s3_util.create_s3_session(remote_url)
s3.upload_file(local_file_path, remote_url.netloc,
remote_path, ExtraArgs=extra_args)
if not keep_original:
os.remove(local_file_path)
else:
raise NotImplementedError(
'Unrecognized URL scheme: {SCHEME}'.format(
SCHEME=remote_url.scheme))
def url_exists(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
return os.path.exists(local_path)
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
from botocore.exceptions import ClientError
try:
s3.get_object(Bucket=url.netloc, Key=url.path)
return True
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
return False
raise err
# otherwise, just try to "read" from the URL, and assume that *any*
# non-throwing response contains the resource represented by the URL
try:
read_from_url(url)
return True
except URLError:
return False
def remove_url(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
os.remove(local_path)
return
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
s3.delete_object(Bucket=url.s3_bucket, Key=url.path)
return
# Don't even try for other URL schemes.
def _list_s3_objects(client, url, num_entries, start_after=None):
list_args = dict(
Bucket=url.netloc,
Prefix=url.path,
MaxKeys=num_entries)
if start_after is not None:
list_args['StartAfter'] = start_after
result = client.list_objects_v2(**list_args)
last_key = None
if result['IsTruncated']:
last_key = result['Contents'][-1]['Key']
iter = (key for key in
(
os.path.relpath(entry['Key'], url.path)
for entry in result['Contents']
)
if key != '.')
return iter, last_key
def _iter_s3_prefix(client, url, num_entries=1024):
key = None
while True:
contents, key = _list_s3_objects(
client, url, num_entries, start_after=key)
for x in contents:
yield x
if not key:
break
def list_url(url):
url = url_util.parse(url)
local_path = url_util.local_file_path(url)
if local_path:
return os.listdir(local_path)
if url.scheme == 's3':
s3 = s3_util.create_s3_session(url)
return list(set(
key.split('/', 1)[0]
for key in _iter_s3_prefix(s3, url)))
def _spider(url, visited, root, depth, max_depth, raise_on_error):
"""Fetches URL and any pages it links to up to max_depth.
depth should initially be zero, and max_depth is the max depth of
links to follow from the root.
Prints out a warning only if the root can't be fetched; it ignores
errors with pages that the root links to.
Returns a tuple of:
- pages: dict of pages visited (URL) mapped to their full text.
- links: set of links encountered while visiting the pages.
"""
pages = {} # dict from page URL -> text content.
links = set() # set of all links seen on visited pages.
try:
response_url, _, response = read_from_url(url, 'text/html')
if not response_url or not response:
return pages, links
page = codecs.getreader('utf-8')(response).read()
pages[response_url] = page
# Parse out the links in the page
link_parser = LinkParser()
subcalls = []
link_parser.feed(page)
while link_parser.links:
raw_link = link_parser.links.pop()
abs_link = url_util.join(
response_url,
raw_link.strip(),
resolve_href=True)
links.add(abs_link)
# Skip stuff that looks like an archive
if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
continue
# Skip things outside the root directory
if not abs_link.startswith(root):
continue
# Skip already-visited links
if abs_link in visited:
continue
# If we're not at max depth, follow links.
if depth < max_depth:
subcalls.append((abs_link, visited, root,
depth + 1, max_depth, raise_on_error))
visited.add(abs_link)
if subcalls:
pool = NonDaemonPool(processes=len(subcalls))
try:
results = pool.map(_spider_wrapper, subcalls)
for sub_pages, sub_links in results:
pages.update(sub_pages)
links.update(sub_links)
finally:
pool.terminate()
pool.join()
except URLError as e:
tty.debug(e)
if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
tty.warn("Spack was unable to fetch url list due to a certificate "
"verification problem. You can try running spack -k, "
"which will not check SSL certificates. Use this at your "
"own risk.")
if raise_on_error:
raise NoNetworkConnectionError(str(e), url)
except HTMLParseError as e:
# This error indicates that Python's HTML parser sucks.
msg = "Got an error parsing HTML."
# Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
if sys.version_info[:3] < (2, 7, 3):
msg += " Use Python 2.7.3 or newer for better HTML parsing."
tty.warn(msg, url, "HTMLParseError: " + str(e))
except Exception as e:
# Other types of errors are completely ignored, except in debug mode.
tty.debug("Error in _spider: %s:%s" % (type(e), e),
traceback.format_exc())
return pages, links
def _spider_wrapper(args):
"""Wrapper for using spider with multiprocessing."""
return _spider(*args)
def _urlopen(req, *args, **kwargs):
"""Wrapper for compatibility with old versions of Python."""
url = req
try:
url = url.get_full_url()
except AttributeError:
pass
# We don't pass 'context' parameter because it was only introduced starting
# with versions 2.7.9 and 3.4.3 of Python.
if 'context' in kwargs:
del kwargs['context']
opener = urlopen
if url_util.parse(url).scheme == 's3':
import spack.s3_handler
opener = spack.s3_handler.open
return opener(req, *args, **kwargs)
def spider(root, depth=0):
"""Gets web pages from a root URL.
If depth is specified (e.g., depth=2), then this will also follow
up to <depth> levels of links from the root.
This will spawn processes to fetch the children, for much improved
performance over a sequential fetch.
"""
root = url_util.parse(root)
pages, links = _spider(root, set(), root, 0, depth, False)
return pages, links
def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):
"""Scrape web pages for new versions of a tarball.
Arguments:
archive_urls (str or list or tuple): URL or sequence of URLs for
different versions of a package. Typically these are just the
tarballs from the package file itself. By default, this searches
the parent directories of archives.
Keyword Arguments:
list_url (str or None): URL for a listing of archives.
Spack will scrape these pages for download links that look
like the archive URL.
list_depth (int): Max depth to follow links on list_url pages.
Defaults to 0.
"""
if not isinstance(archive_urls, (list, tuple)):
archive_urls = [archive_urls]
# Generate a list of list_urls based on archive urls and any
# explicitly listed list_url in the package
list_urls = set()
if list_url is not None:
list_urls.add(list_url)
for aurl in archive_urls:
list_urls |= spack.url.find_list_urls(aurl)
# Add '/' to the end of the URL. Some web servers require this.
additional_list_urls = set()
for lurl in list_urls:
if not lurl.endswith('/'):
additional_list_urls.add(lurl + '/')
list_urls |= additional_list_urls
# Grab some web pages to scrape.
pages = {}
links = set()
for lurl in list_urls:
pg, lnk = spider(lurl, depth=list_depth)
pages.update(pg)
links.update(lnk)
# Scrape them for archive URLs
regexes = []
for aurl in archive_urls:
# This creates a regex from the URL with a capture group for
# the version part of the URL. The capture group is converted
# to a generic wildcard, so we can use this to extract things
# on a page that look like archive URLs.
url_regex = spack.url.wildcard_version(aurl)
# We'll be a bit more liberal and just look for the archive
# part, not the full path.
url_regex = os.path.basename(url_regex)
# We need to add a / to the beginning of the regex to prevent
# Spack from picking up similarly named packages like:
# https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz
# https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz
# https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz
# https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz
url_regex = '/' + url_regex
# We need to add a $ anchor to the end of the regex to prevent
# Spack from picking up signature files like:
# .asc
# .md5
# .sha256
# .sig
# However, SourceForge downloads still need to end in '/download'.
url_regex += r'(\/download)?$'
regexes.append(url_regex)
# Build a dict version -> URL from any links that match the wildcards.
# Walk through archive_url links first.
# Any conflicting versions will be overwritten by the list_url links.
versions = {}
for url in archive_urls + sorted(links):
if any(re.search(r, url) for r in regexes):
try:
ver = spack.url.parse_version(url)
versions[ver] = url
except spack.url.UndetectableVersionError:
continue
return versions
def standardize_header_names(headers):
"""Replace certain header names with standardized spellings.
Standardizes the spellings of the following header names:
- Accept-ranges
- Content-length
- Content-type
- Date
- Last-modified
- Server
Every name considered is translated to one of the above names if the only
difference between the two is how the first letters of each word are
capitalized; whether words are separated; or, if separated, whether they
are so by a dash (-), underscore (_), or space ( ). Header names that
cannot be mapped as described above are returned unaltered.
For example: The standard spelling of "Content-length" would be substituted
for any of the following names:
- Content-length
- content_length
- contentlength
- content_Length
- contentLength
- content Length
... and any other header name, such as "Content-encoding", would not be
altered, regardless of spelling.
If headers is a string, then it (or an appropriate substitute) is returned.
If headers is a non-empty tuple, headers[0] is a string, and there exists a
standardized spelling for header[0] that differs from it, then a new tuple
is returned. This tuple has the same elements as headers, except the first
element is the standardized spelling for headers[0].
If headers is a sequence, then a new list is considered, where each element
is its corresponding element in headers, but mapped as above if a string or
tuple. This new list is returned if at least one of its elements differ
from their corrsponding element in headers.
If headers is a mapping, then a new dict is considered, where the key in
each item is the key of its corresponding item in headers, mapped as above
if a string or tuple. The value is taken from the corresponding item. If
the keys of multiple items in headers map to the same key after being
standardized, then the value for the resulting item is undefined. The new
dict is returned if at least one of its items has a key that differs from
that of their corresponding item in headers, or if the keys of multiple
items in headers map to the same key after being standardized.
In all other cases headers is returned unaltered.
"""
if isinstance(headers, six.string_types):
for standardized_spelling, other_spellings in (
HTTP_HEADER_NAME_ALIASES.items()):
if headers in other_spellings:
if headers == standardized_spelling:
return headers
return standardized_spelling
return headers
if isinstance(headers, tuple):
if not headers:
return headers
old = headers[0]
if isinstance(old, six.string_types):
new = standardize_header_names(old)
if old is not new:
return (new,) + headers[1:]
return headers
try:
changed = False
new_dict = {}
for key, value in headers.items():
if isinstance(key, (tuple, six.string_types)):
old_key, key = key, standardize_header_names(key)
changed = changed or key is not old_key
new_dict[key] = value
return new_dict if changed else headers
except (AttributeError, TypeError, ValueError):
pass
try:
changed = False
new_list = []
for item in headers:
if isinstance(item, (tuple, six.string_types)):
old_item, item = item, standardize_header_names(item)
changed = changed or item is not old_item
new_list.append(item)
return new_list if changed else headers
except TypeError:
pass
return headers
class SpackWebError(spack.error.SpackError):
"""Superclass for Spack web spidering errors."""
class NoNetworkConnectionError(SpackWebError):
"""Raised when an operation can't get an internet connection."""
def __init__(self, message, url):
super(NoNetworkConnectionError, self).__init__(
"No network connection: " + str(message),
"URL was: " + str(url))
self.url = url
|
[
"os.remove",
"llnl.util.tty.warn",
"spack.util.url.local_file_path",
"shutil.copy",
"os.path.dirname",
"os.path.exists",
"traceback.format_exc",
"itertools.product",
"ssl._create_unverified_context",
"html.parser.HTMLParser.__init__",
"re.search",
"os.path.basename",
"os.rename",
"shutil.copy2",
"spack.util.url.format",
"os.listdir",
"codecs.getreader",
"ssl.create_default_context",
"llnl.util.tty.debug",
"spack.util.s3.create_s3_session",
"os.environ.get",
"os.path.relpath",
"spack.util.url.parse"
] |
[((3683, 3702), 'spack.util.url.parse', 'url_util.parse', (['url'], {}), '(url)\n', (3697, 3702), True, 'import spack.util.url as url_util\n'), ((5817, 5944), 'llnl.util.tty.warn', 'tty.warn', (['"""Spack will not check SSL certificates. You need to update your Python to enable certificate verification."""'], {}), "(\n 'Spack will not check SSL certificates. You need to update your Python to enable certificate verification.'\n )\n", (5825, 5944), True, 'import llnl.util.tty as tty\n'), ((6082, 6109), 'spack.util.url.parse', 'url_util.parse', (['remote_path'], {}), '(remote_path)\n', (6096, 6109), True, 'import spack.util.url as url_util\n'), ((6297, 6333), 'spack.util.url.local_file_path', 'url_util.local_file_path', (['remote_url'], {}), '(remote_url)\n', (6321, 6333), True, 'import spack.util.url as url_util\n'), ((7707, 7726), 'spack.util.url.parse', 'url_util.parse', (['url'], {}), '(url)\n', (7721, 7726), True, 'import spack.util.url as url_util\n'), ((7744, 7773), 'spack.util.url.local_file_path', 'url_util.local_file_path', (['url'], {}), '(url)\n', (7768, 7773), True, 'import spack.util.url as url_util\n'), ((8479, 8498), 'spack.util.url.parse', 'url_util.parse', (['url'], {}), '(url)\n', (8493, 8498), True, 'import spack.util.url as url_util\n'), ((8517, 8546), 'spack.util.url.local_file_path', 'url_util.local_file_path', (['url'], {}), '(url)\n', (8541, 8546), True, 'import spack.util.url as url_util\n'), ((9718, 9737), 'spack.util.url.parse', 'url_util.parse', (['url'], {}), '(url)\n', (9732, 9737), True, 'import spack.util.url as url_util\n'), ((9756, 9785), 'spack.util.url.local_file_path', 'url_util.local_file_path', (['url'], {}), '(url)\n', (9780, 9785), True, 'import spack.util.url as url_util\n'), ((14445, 14465), 'spack.util.url.parse', 'url_util.parse', (['root'], {}), '(root)\n', (14459, 14465), True, 'import spack.util.url as url_util\n'), ((2119, 2144), 'html.parser.HTMLParser.__init__', 'HTMLParser.__init__', (['self'], {}), '(self)\n', (2138, 2144), False, 'from html.parser import HTMLParser\n'), ((3276, 3309), 'os.environ.get', 'os.environ.get', (['"""S3_ENDPOINT_URL"""'], {}), "('S3_ENDPOINT_URL')\n", (3290, 3309), False, 'import os\n'), ((4408, 4428), 'spack.util.url.format', 'url_util.format', (['url'], {}), '(url)\n', (4423, 4428), True, 'import spack.util.url as url_util\n'), ((7808, 7834), 'os.path.exists', 'os.path.exists', (['local_path'], {}), '(local_path)\n', (7822, 7834), False, 'import os\n'), ((7876, 7906), 'spack.util.s3.create_s3_session', 's3_util.create_s3_session', (['url'], {}), '(url)\n', (7901, 7906), True, 'import spack.util.s3 as s3_util\n'), ((8574, 8595), 'os.remove', 'os.remove', (['local_path'], {}), '(local_path)\n', (8583, 8595), False, 'import os\n'), ((8652, 8682), 'spack.util.s3.create_s3_session', 's3_util.create_s3_session', (['url'], {}), '(url)\n', (8677, 8682), True, 'import spack.util.s3 as s3_util\n'), ((9820, 9842), 'os.listdir', 'os.listdir', (['local_path'], {}), '(local_path)\n', (9830, 9842), False, 'import os\n'), ((9884, 9914), 'spack.util.s3.create_s3_session', 's3_util.create_s3_session', (['url'], {}), '(url)\n', (9909, 9914), True, 'import spack.util.s3 as s3_util\n'), ((16607, 16634), 'os.path.basename', 'os.path.basename', (['url_regex'], {}), '(url_regex)\n', (16623, 16634), False, 'import os\n'), ((4356, 4388), 'ssl._create_unverified_context', 'ssl._create_unverified_context', ([], {}), '()\n', (4386, 4388), False, 'import ssl\n'), ((6386, 6419), 'os.path.dirname', 'os.path.dirname', (['remote_file_path'], {}), '(remote_file_path)\n', (6401, 6419), False, 'import os\n'), ((6459, 6505), 'shutil.copy', 'shutil.copy', (['local_file_path', 'remote_file_path'], {}), '(local_file_path, remote_file_path)\n', (6470, 6505), False, 'import shutil\n'), ((7304, 7341), 'spack.util.s3.create_s3_session', 's3_util.create_s3_session', (['remote_url'], {}), '(remote_url)\n', (7329, 7341), True, 'import spack.util.s3 as s3_util\n'), ((12372, 12384), 'llnl.util.tty.debug', 'tty.debug', (['e'], {}), '(e)\n', (12381, 12384), True, 'import llnl.util.tty as tty\n'), ((13963, 13982), 'spack.util.url.parse', 'url_util.parse', (['url'], {}), '(url)\n', (13977, 13982), True, 'import spack.util.url as url_util\n'), ((1404, 1436), 'itertools.product', 'product', (['"""Aa"""', '_separators', '"""Rr"""'], {}), "('Aa', _separators, 'Rr')\n", (1411, 1436), False, 'from itertools import product\n'), ((1540, 1572), 'itertools.product', 'product', (['"""Cc"""', '_separators', '"""Ll"""'], {}), "('Cc', _separators, 'Ll')\n", (1547, 1572), False, 'from itertools import product\n'), ((1672, 1704), 'itertools.product', 'product', (['"""Cc"""', '_separators', '"""Tt"""'], {}), "('Cc', _separators, 'Tt')\n", (1679, 1704), False, 'from itertools import product\n'), ((1842, 1874), 'itertools.product', 'product', (['"""Ll"""', '_separators', '"""Mm"""'], {}), "('Ll', _separators, 'Mm')\n", (1849, 1874), False, 'from itertools import product\n'), ((3375, 3419), 'spack.util.url.parse', 'url_util.parse', (['endpoint_url'], {'scheme': '"""https"""'}), "(endpoint_url, scheme='https')\n", (3389, 3419), True, 'import spack.util.url as url_util\n'), ((4193, 4221), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (4219, 4221), False, 'import ssl\n'), ((5561, 5581), 'spack.util.url.format', 'url_util.format', (['url'], {}), '(url)\n', (5576, 5581), True, 'import spack.util.url as url_util\n'), ((6553, 6597), 'os.rename', 'os.rename', (['local_file_path', 'remote_file_path'], {}), '(local_file_path, remote_file_path)\n', (6562, 6597), False, 'import os\n'), ((7502, 7528), 'os.remove', 'os.remove', (['local_file_path'], {}), '(local_file_path)\n', (7511, 7528), False, 'import os\n'), ((9262, 9301), 'os.path.relpath', 'os.path.relpath', (["entry['Key']", 'url.path'], {}), "(entry['Key'], url.path)\n", (9277, 9301), False, 'import os\n'), ((12470, 12665), 'llnl.util.tty.warn', 'tty.warn', (['"""Spack was unable to fetch url list due to a certificate verification problem. You can try running spack -k, which will not check SSL certificates. Use this at your own risk."""'], {}), "(\n 'Spack was unable to fetch url list due to a certificate verification problem. You can try running spack -k, which will not check SSL certificates. Use this at your own risk.'\n )\n", (12478, 12665), True, 'import llnl.util.tty as tty\n'), ((13388, 13410), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13408, 13410), False, 'import traceback\n'), ((17703, 17720), 're.search', 're.search', (['r', 'url'], {}), '(r, url)\n', (17712, 17720), False, 'import re\n'), ((10853, 10878), 'codecs.getreader', 'codecs.getreader', (['"""utf-8"""'], {}), "('utf-8')\n", (10869, 10878), False, 'import codecs\n'), ((6984, 7031), 'shutil.copy2', 'shutil.copy2', (['local_file_path', 'remote_file_path'], {}), '(local_file_path, remote_file_path)\n', (6996, 7031), False, 'import shutil\n'), ((7052, 7078), 'os.remove', 'os.remove', (['local_file_path'], {}), '(local_file_path)\n', (7061, 7078), False, 'import os\n')]
|
from typing import Tuple
from Simulation import Simulation
from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI
from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, \
ColumnOutputSpecification, ProductSpecification, PerCompoundProperty
PATH = 'C:/Users/s2199718/Desktop/RL_PD/AspenSimulation/HydrocarbonMixture.bkp'
class AspenAPI(BaseAspenDistillationAPI):
def __init__(self):
self._flowsheet: Simulation = Simulation(PATH=PATH, VISIBILITY=False)
self._feed_name: str = "S1"
self._tops_name: str = "S2"
self._bottoms_name: str = "S3"
self._name_to_aspen_name = PerCompoundProperty(ethane="ETHANE",
propane="PROPANE", isobutane="I-BUTANE",
n_butane="N-BUTANE", isopentane="I-PENTAN", n_pentane="N-PENTAN")
def set_input_stream_specification(self, stream_specification: StreamSpecification) -> None:
"""Sets the input stream to a column to fit the stream specification"""
# Defining the Thermodynamic Properties
self._flowsheet.STRM_Temperature(self._feed_name, stream_specification.temperature)
self._flowsheet.STRM_Pressure(self._feed_name, stream_specification.pressure)
# Defining the Stream Composition for the Feed
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.ethane,
stream_specification.molar_flows.ethane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.propane,
stream_specification.molar_flows.propane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isobutane,
stream_specification.molar_flows.isobutane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_butane,
stream_specification.molar_flows.n_butane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.isopentane,
stream_specification.molar_flows.isopentane)
self._flowsheet.STRM_Flowrate(self._feed_name, self._name_to_aspen_name.n_pentane,
stream_specification.molar_flows.n_pentane)
def get_output_stream_specifications(self) -> Tuple[StreamSpecification, StreamSpecification]:
# Getting the physical values of Top streams
tops_temperature = self._flowsheet.STRM_Get_Temperature(self._tops_name)
tops_pressure = self._flowsheet.STRM_Get_Pressure(self._tops_name)
# Acquiring the outputs out of the Destillate (Top Stream)
tops_ethane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.ethane)
tops_propane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.propane)
tops_isobutane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isobutane)
tops_n_butane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_butane)
tops_isopentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.isopentane)
tops_n_pentane = self._flowsheet.STRM_Get_Outputs(self._tops_name, self._name_to_aspen_name.n_pentane)
# Passing all the variables to their respective "slot"
tops_specifications = StreamSpecification(temperature=tops_temperature, pressure=tops_pressure,
molar_flows=PerCompoundProperty(ethane=tops_ethane,
propane=tops_propane,
isobutane=tops_isobutane,
n_butane=tops_n_butane,
isopentane=tops_isopentane,
n_pentane=tops_n_pentane))
# Getting the physical values of Top streams
bots_temperature = self._flowsheet.STRM_Get_Temperature(self._bottoms_name)
bots_pressure = self._flowsheet.STRM_Get_Pressure(self._bottoms_name)
# Acquiring the outputs out of the Bottom (Bottom Stream)
bots_ethane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.ethane)
bots_propane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.propane)
bots_isobutane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isobutane)
bots_n_butane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_butane)
bots_isopentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.isopentane)
bots_n_pentane = self._flowsheet.STRM_Get_Outputs(self._bottoms_name, self._name_to_aspen_name.n_pentane)
# Tubulating the Results of the Bottom Stream
bots_specifications = StreamSpecification(temperature=bots_temperature, pressure=bots_pressure,
molar_flows=PerCompoundProperty(ethane=bots_ethane,
propane=bots_propane,
isobutane=bots_isobutane,
n_butane=bots_n_butane,
isopentane=bots_isopentane,
n_pentane=bots_n_pentane))
return tops_specifications, bots_specifications
def get_simulated_column_properties(self) -> ColumnOutputSpecification:
# D_F_Location = self._flowsheet.BLK_Get_FeedLocation()
# D_Pressure = self._flowsheet.BLK_Get_Pressure()
# D_Reflux_Ratio = self._flowsheet.BLK_Get_RefluxRatio()
# D_Reboiler_Ratio = self._flowsheet.BLK_Get_ReboilerRatio()
D_Cond_Duty = self._flowsheet.BLK_Get_Condenser_Duty()
D_Reb_Duty = self._flowsheet.BLK_Get_Reboiler_Duty()
D_Col_Diameter = self._flowsheet.BLK_Get_Column_Diameter()
D_Specifications = ColumnOutputSpecification(condensor_duty=D_Cond_Duty, reboiler_duty=D_Reb_Duty,
diameter=D_Col_Diameter)
return D_Specifications
def set_column_specification(self, column_specification: ColumnInputSpecification) -> None:
self._flowsheet.BLK_NumberOfStages(column_specification.n_stages)
self._flowsheet.BLK_FeedLocation(column_specification.feed_stage_location)
self._flowsheet.BLK_Pressure(column_specification.condensor_pressure)
self._flowsheet.BLK_RefluxRatio(column_specification.reflux_ratio)
self._flowsheet.BLK_ReboilerRatio(column_specification.reboil_ratio)
def solve_flowsheet(self) -> bool:
self._flowsheet.Run()
def get_column_cost(self, column_specification: ColumnOutputSpecification) -> float:
"""Calculates the TAC of the column."""
raise NotImplementedError
def get_stream_value(self, stream_specification: StreamSpecification) -> float:
"""Calculates the value (per year) of a stream."""
raise NotImplementedError
def stream_is_product(self, stream_specification: StreamSpecification, product_specification:
ProductSpecification) -> bool:
"""Checks whether a stream meets the product specification."""
raise NotImplementedError
if __name__ == '__main__':
from hydrocarbon_problem.api.api_tests import test_api
aspen_api = AspenAPI()
test_api(aspen_api)
|
[
"Simulation.Simulation",
"hydrocarbon_problem.api.api_tests.test_api",
"hydrocarbon_problem.api.types.PerCompoundProperty",
"hydrocarbon_problem.api.types.ColumnOutputSpecification"
] |
[((8061, 8080), 'hydrocarbon_problem.api.api_tests.test_api', 'test_api', (['aspen_api'], {}), '(aspen_api)\n', (8069, 8080), False, 'from hydrocarbon_problem.api.api_tests import test_api\n'), ((481, 520), 'Simulation.Simulation', 'Simulation', ([], {'PATH': 'PATH', 'VISIBILITY': '(False)'}), '(PATH=PATH, VISIBILITY=False)\n', (491, 520), False, 'from Simulation import Simulation\n'), ((667, 820), 'hydrocarbon_problem.api.types.PerCompoundProperty', 'PerCompoundProperty', ([], {'ethane': '"""ETHANE"""', 'propane': '"""PROPANE"""', 'isobutane': '"""I-BUTANE"""', 'n_butane': '"""N-BUTANE"""', 'isopentane': '"""I-PENTAN"""', 'n_pentane': '"""N-PENTAN"""'}), "(ethane='ETHANE', propane='PROPANE', isobutane=\n 'I-BUTANE', n_butane='N-BUTANE', isopentane='I-PENTAN', n_pentane=\n 'N-PENTAN')\n", (686, 820), False, 'from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, ColumnOutputSpecification, ProductSpecification, PerCompoundProperty\n'), ((6608, 6717), 'hydrocarbon_problem.api.types.ColumnOutputSpecification', 'ColumnOutputSpecification', ([], {'condensor_duty': 'D_Cond_Duty', 'reboiler_duty': 'D_Reb_Duty', 'diameter': 'D_Col_Diameter'}), '(condensor_duty=D_Cond_Duty, reboiler_duty=\n D_Reb_Duty, diameter=D_Col_Diameter)\n', (6633, 6717), False, 'from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, ColumnOutputSpecification, ProductSpecification, PerCompoundProperty\n'), ((3669, 3843), 'hydrocarbon_problem.api.types.PerCompoundProperty', 'PerCompoundProperty', ([], {'ethane': 'tops_ethane', 'propane': 'tops_propane', 'isobutane': 'tops_isobutane', 'n_butane': 'tops_n_butane', 'isopentane': 'tops_isopentane', 'n_pentane': 'tops_n_pentane'}), '(ethane=tops_ethane, propane=tops_propane, isobutane=\n tops_isobutane, n_butane=tops_n_butane, isopentane=tops_isopentane,\n n_pentane=tops_n_pentane)\n', (3688, 3843), False, 'from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, ColumnOutputSpecification, ProductSpecification, PerCompoundProperty\n'), ((5422, 5596), 'hydrocarbon_problem.api.types.PerCompoundProperty', 'PerCompoundProperty', ([], {'ethane': 'bots_ethane', 'propane': 'bots_propane', 'isobutane': 'bots_isobutane', 'n_butane': 'bots_n_butane', 'isopentane': 'bots_isopentane', 'n_pentane': 'bots_n_pentane'}), '(ethane=bots_ethane, propane=bots_propane, isobutane=\n bots_isobutane, n_butane=bots_n_butane, isopentane=bots_isopentane,\n n_pentane=bots_n_pentane)\n', (5441, 5596), False, 'from hydrocarbon_problem.api.types import StreamSpecification, ColumnInputSpecification, ColumnOutputSpecification, ProductSpecification, PerCompoundProperty\n')]
|
import random
def ReLU(x, derivative=False):
""" ReLU function with corresponding derivative """
if derivative:
x[x <= 0] = 0
x[x > 0] = 1
return x
x[x < 0] = 0
return x
def ReLU_uniform_random():
""" Ideal weight starting values for ReLU """
return random.uniform(0.005, 0.2)
def uniform_random():
""" Generic uniform random from -n to n given output is multiplied by n """
return random.uniform(-1, 1)
|
[
"random.uniform"
] |
[((301, 327), 'random.uniform', 'random.uniform', (['(0.005)', '(0.2)'], {}), '(0.005, 0.2)\n', (315, 327), False, 'import random\n'), ((442, 463), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (456, 463), False, 'import random\n')]
|
#!/bin/python
import unittest
import logging
import os
import sys
from src.message import message
class message_test(unittest.TestCase):
def test_from_message_record(self):
message_record = message(
msg_id=185,
channel_id=82,
source_id=50,
source_chat_id='111111',
msg='Hello world')
row = message_record.str().split(',')
row = [e.replace("'", "") if e.find("'") > -1 else int(e) for e in row]
message_record_from_row = message.from_message_record(row, False)
## Positive test
self.assertEqual(message_record.date, message_record_from_row.date)
self.assertEqual(message_record.time, message_record_from_row.time)
self.assertEqual(message_record.msg_id, message_record_from_row.msg_id)
self.assertEqual(message_record.channel_id, message_record_from_row.channel_id)
self.assertEqual(message_record.source_id, message_record_from_row.source_id)
self.assertEqual(message_record.source_chat_id, message_record_from_row.source_chat_id)
self.assertEqual(message_record.msg, message_record_from_row.msg)
## Negative test
message_record_from_row = message.from_message_record(None)
self.assertEqual(0, message_record_from_row.msg_id)
self.assertEqual(0, message_record_from_row.channel_id)
self.assertEqual(0, message_record_from_row.source_id)
self.assertEqual('', message_record_from_row.source_chat_id)
self.assertEqual('', message_record_from_row.msg)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"src.message.message",
"src.message.message.from_message_record"
] |
[((1720, 1735), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1733, 1735), False, 'import unittest\n'), ((204, 300), 'src.message.message', 'message', ([], {'msg_id': '(185)', 'channel_id': '(82)', 'source_id': '(50)', 'source_chat_id': '"""111111"""', 'msg': '"""Hello world"""'}), "(msg_id=185, channel_id=82, source_id=50, source_chat_id='111111',\n msg='Hello world')\n", (211, 300), False, 'from src.message import message\n'), ((598, 637), 'src.message.message.from_message_record', 'message.from_message_record', (['row', '(False)'], {}), '(row, False)\n', (625, 637), False, 'from src.message import message\n'), ((1316, 1349), 'src.message.message.from_message_record', 'message.from_message_record', (['None'], {}), '(None)\n', (1343, 1349), False, 'from src.message import message\n')]
|
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
from app.db.session import Base
from app.models import CharacterEpisode
class Episode(Base):
__tablename__ = "episode"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True)
air_date = Column(Date)
segment = Column(String, unique=True)
characters = relationship("CharacterEpisode", back_populates="episode")
comments = relationship("Comment", back_populates="episode")
association_ids = association_proxy(
"characters",
"character_id",
creator=lambda cid: CharacterEpisode(character_id=cid),
)
|
[
"sqlalchemy.orm.relationship",
"sqlalchemy.Column",
"app.models.CharacterEpisode"
] |
[((291, 336), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (297, 336), False, 'from sqlalchemy import Column, Integer, String, Date\n'), ((348, 375), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)'}), '(String, unique=True)\n', (354, 375), False, 'from sqlalchemy import Column, Integer, String, Date\n'), ((391, 403), 'sqlalchemy.Column', 'Column', (['Date'], {}), '(Date)\n', (397, 403), False, 'from sqlalchemy import Column, Integer, String, Date\n'), ((418, 445), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)'}), '(String, unique=True)\n', (424, 445), False, 'from sqlalchemy import Column, Integer, String, Date\n'), ((464, 522), 'sqlalchemy.orm.relationship', 'relationship', (['"""CharacterEpisode"""'], {'back_populates': '"""episode"""'}), "('CharacterEpisode', back_populates='episode')\n", (476, 522), False, 'from sqlalchemy.orm import relationship\n'), ((538, 587), 'sqlalchemy.orm.relationship', 'relationship', (['"""Comment"""'], {'back_populates': '"""episode"""'}), "('Comment', back_populates='episode')\n", (550, 587), False, 'from sqlalchemy.orm import relationship\n'), ((704, 738), 'app.models.CharacterEpisode', 'CharacterEpisode', ([], {'character_id': 'cid'}), '(character_id=cid)\n', (720, 738), False, 'from app.models import CharacterEpisode\n')]
|
import json
#hack for python2 support
try:
from .blkdiscoveryutil import *
except:
from blkdiscoveryutil import *
class LsBlk(BlkDiscoveryUtil):
def disks(self):
retval = []
parent = self.details()
for path, diskdetails in parent.items():
if not diskdetails.get('type') == "disk":
continue
retval.append(path)
return retval
def label_children(self,retval):
if not retval.get('children'):
return
children = {}
for child in retval['children']:
self.label_children(child)
if child.get('name'):
name = child['name']
else:
name = "UNKNOWN"
children[name] = child
retval['children'] = children
def details(self):
retval = {}
rawoutput = self.subprocess_check_output(["lsblk", '--json', '-O', '-p'])
parent = json.loads(rawoutput)
for child in parent.get('blockdevices',[]):
#print child['id'] + child['class']
path = child.get('name')
retval[path] = child
for disk, details in retval.items():
self.label_children(details)
return self.stringify(retval)
if __name__ == '__main__':
import pprint
pp = pprint.PrettyPrinter(indent=4)
l = LsBlk()
devdata = l.details()
pp.pprint(devdata)
disks = l.disks()
pp.pprint(disks)
|
[
"pprint.PrettyPrinter",
"json.loads"
] |
[((1317, 1347), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (1337, 1347), False, 'import pprint\n'), ((946, 967), 'json.loads', 'json.loads', (['rawoutput'], {}), '(rawoutput)\n', (956, 967), False, 'import json\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.