code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import scipy.io
from scipy import optimize
from feature_func import *
from preprocess import *
from utils import *
def fit_data(gt_count, feature_data, function):
return optimize.curve_fit(function, feature_data, gt_count)
def plot_data(gt_count, feature_data, test_func=None):
plt.scatter(feature_data, gt_count, label='raw data')
if test_func != None:
params, params_var = fit_data(gt_count, feature_data, test_func)
x_linspace = np.linspace(min(feature_data), max(feature_data), num=len(feature_data))
plt.plot(x_linspace, test_func(x_linspace, *params), label='Fitted quadratic polynomial')
def test_func(x, a2, a1, a0):
return a2 * np.power(x, 2) + a1 * np.power(x, 1) + a0
def retrieve_data(image_root_path, mod=10):
# processing ucsd pedestrian dataset
sub_folder_index = 0
image_count = 0
images = []
gt_count_in_images = []
for sub_folder in image_root_path.glob('**/'):
print(sub_folder.name.split('.')[0].split('_')[-1])
if sub_folder_index == 0 or sub_folder.name.split('_')[0] != 'vidf1' or int(sub_folder.name.split('.')[0].split('_')[-1]) > 9:
sub_folder_index += 1
continue
print(sub_folder.name)
mat_path = annotation_root_path / (sub_folder.name.split('.')[0] + '_frame_full.mat')
mat = read_mat(mat_path)
for f in sub_folder.iterdir():
if not f.is_file():
continue
frame_index = int(f.name[-7:-4]) - 1
if image_count % mod == 0:
img = cv2.imread(str(f), 0)
images.append(img)
gt_count_in_images.append(mat['frame'][0][frame_index][0][0][0].shape[0])
image_count += 1
sub_folder_index += 1
return images, gt_count_in_images
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
background_image_path = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/background.png'
background_image = cv2.imread(background_image_path, 0)
image_root_dir = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf'
image_root_path = Path(image_root_dir)
annotation_root_dir = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf-cvpr'
annotation_root_path = Path(annotation_root_dir)
pmap = get_pmapxy('/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf-cvpr/vidf1_33_dmap3.mat')
images, gt_count_in_images = retrieve_data(image_root_path, mod=30)
print(len(images))
edited = get_abs_diff(images, background_image)
blurred = get_foreground_mask(edited, threshold=25)
seg_peri = get_seg_perimeter(blurred)
# perspective_seg_size = get_seg_size(edited, pmapxy=pmap)
plot_data(gt_count_in_images, seg_peri, test_func)
plt.legend(loc='best')
plt.title(label='segmentation perimeter against people count')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.power",
"scipy.optimize.curve_fit",
"cv2.imread",
"pathlib.Path"
] |
[((264, 316), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['function', 'feature_data', 'gt_count'], {}), '(function, feature_data, gt_count)\n', (282, 316), False, 'from scipy import optimize\n'), ((378, 431), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_data', 'gt_count'], {'label': '"""raw data"""'}), "(feature_data, gt_count, label='raw data')\n", (389, 431), True, 'import matplotlib.pyplot as plt\n'), ((2120, 2156), 'cv2.imread', 'cv2.imread', (['background_image_path', '(0)'], {}), '(background_image_path, 0)\n', (2130, 2156), False, 'import cv2\n'), ((2267, 2287), 'pathlib.Path', 'Path', (['image_root_dir'], {}), '(image_root_dir)\n', (2271, 2287), False, 'from pathlib import Path\n'), ((2413, 2438), 'pathlib.Path', 'Path', (['annotation_root_dir'], {}), '(annotation_root_dir)\n', (2417, 2438), False, 'from pathlib import Path\n'), ((2923, 2945), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2933, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2950, 3012), 'matplotlib.pyplot.title', 'plt.title', ([], {'label': '"""segmentation perimeter against people count"""'}), "(label='segmentation perimeter against people count')\n", (2959, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3025, 3027), True, 'import matplotlib.pyplot as plt\n'), ((771, 785), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (779, 785), True, 'import numpy as np\n'), ((793, 807), 'numpy.power', 'np.power', (['x', '(1)'], {}), '(x, 1)\n', (801, 807), True, 'import numpy as np\n')]
|
# Code for parallelization of the sensitivity analysis.
# This is accompanied by para.sh
import sys
import pandas as pd
from model import RegionModel
def run(i):
'''
performs a single simulation of a system
'''
m = RegionModel(int_trade, *df.iloc[i, 1:7])
for k in range(max_steps):
m.step()
# Get data
m.compute_statistics()
m.datacollector.collect(m)
outcomes = m.datacollector.get_model_vars_dataframe()
with open(r"data_int_on.csv","a") as f:
f.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(i, int_trade,*df.iloc[i, 1:7], *outcomes.iloc[0]))
max_steps = 1000
df = pd.read_csv('out.csv')
batches = int(sys.argv[1])
batch = int(sys.argv[2])
int_trade = True if int(sys.argv[3]) == 1 else False
runs_per_batch = int(len(df.index)/batches) + 1
for i in range(batch * runs_per_batch, (batch + 1) * runs_per_batch):
run(i)
|
[
"pandas.read_csv",
"model.RegionModel"
] |
[((664, 686), 'pandas.read_csv', 'pd.read_csv', (['"""out.csv"""'], {}), "('out.csv')\n", (675, 686), True, 'import pandas as pd\n'), ((233, 273), 'model.RegionModel', 'RegionModel', (['int_trade', '*df.iloc[i, 1:7]'], {}), '(int_trade, *df.iloc[i, 1:7])\n', (244, 273), False, 'from model import RegionModel\n')]
|
from scorebot.db import db_api, db_utils
from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers
team_triggers = {
db_api.PatchsetProposed.type: GroupOfTeamTriggers(
score_db_api=db_api.PatchsetProposed,
triggers=[
# week
TeamTrigger(
name="team_patchset_proposed_for_week1",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=50,
msg="{owner} making good progress this week, posted "
"their 50th patch-set",
),
TeamTrigger(
name="team_patchset_proposed_for_week2",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=100,
msg="{owner} is really active this week, they just posted "
"their 100th patch-set!",
),
# month
TeamTrigger(
name="team_patchset_proposed_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=200,
msg="{owner} made 200 patchsets this month! Cheers!",
),
TeamTrigger(
name="team_patchset_proposed_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=300,
msg="{owner} even reached 300 patchsets this month!",
),
]
),
db_api.PatchsetMerged.type: GroupOfTeamTriggers(
score_db_api=db_api.PatchsetMerged,
triggers=[
# today
TeamTrigger(
name="team_patchset_merged_for_today1",
get_date_scope=db_utils.DateScope.today,
trigger_point=5,
msg="{owner} just merged their 5th review today!"
),
# week
TeamTrigger(
name="team_patchset_merged_for_week1",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=10,
msg="{owner} is doing great! 10 reviews landed this week!"
),
# month
TeamTrigger(
name="team_patchset_merged_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=30,
msg="{owner} rocks! 30 reviews merged this month already!"
),
TeamTrigger(
name="team_patchset_merged_for_month2",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=50,
msg="{owner} keeps putting +2 randomly: 50 reviews merged "
"this month. And looks like the're not gonna stop!"
)
]
),
db_api.PatchsetReviewed.type: GroupOfTeamTriggers(
score_db_api=db_api.PatchsetReviewed,
triggers=[
# week
TeamTrigger(
name="team_patchset_reviewed_for_week1",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=50,
msg="{owner} is really keeps an eye on each other, made 50 "
"reviews this week already!"
),
# month
TeamTrigger(
name="team_patchset_reviewed_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=100,
msg="{owner} has great review activity this month, reached "
"100 reviews just now!"
)
]
)
}
|
[
"scorebot.triggers.triggers_base.TeamTrigger"
] |
[((288, 488), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_proposed_for_week1"""', 'get_date_scope': 'db_utils.DateScope.this_week', 'trigger_point': '(50)', 'msg': '"""{owner} making good progress this week, posted their 50th patch-set"""'}), "(name='team_patchset_proposed_for_week1', get_date_scope=\n db_utils.DateScope.this_week, trigger_point=50, msg=\n '{owner} making good progress this week, posted their 50th patch-set')\n", (299, 488), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((594, 808), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_proposed_for_week2"""', 'get_date_scope': 'db_utils.DateScope.this_week', 'trigger_point': '(100)', 'msg': '"""{owner} is really active this week, they just posted their 100th patch-set!"""'}), "(name='team_patchset_proposed_for_week2', get_date_scope=\n db_utils.DateScope.this_week, trigger_point=100, msg=\n '{owner} is really active this week, they just posted their 100th patch-set!'\n )\n", (605, 808), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((929, 1111), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_proposed_for_month1"""', 'get_date_scope': 'db_utils.DateScope.this_month', 'trigger_point': '(200)', 'msg': '"""{owner} made 200 patchsets this month! Cheers!"""'}), "(name='team_patchset_proposed_for_month1', get_date_scope=\n db_utils.DateScope.this_month, trigger_point=200, msg=\n '{owner} made 200 patchsets this month! Cheers!')\n", (940, 1111), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((1194, 1376), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_proposed_for_month1"""', 'get_date_scope': 'db_utils.DateScope.this_month', 'trigger_point': '(300)', 'msg': '"""{owner} even reached 300 patchsets this month!"""'}), "(name='team_patchset_proposed_for_month1', get_date_scope=\n db_utils.DateScope.this_month, trigger_point=300, msg=\n '{owner} even reached 300 patchsets this month!')\n", (1205, 1376), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((1613, 1783), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_merged_for_today1"""', 'get_date_scope': 'db_utils.DateScope.today', 'trigger_point': '(5)', 'msg': '"""{owner} just merged their 5th review today!"""'}), "(name='team_patchset_merged_for_today1', get_date_scope=db_utils\n .DateScope.today, trigger_point=5, msg=\n '{owner} just merged their 5th review today!')\n", (1624, 1783), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((1884, 2067), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_merged_for_week1"""', 'get_date_scope': 'db_utils.DateScope.this_week', 'trigger_point': '(10)', 'msg': '"""{owner} is doing great! 10 reviews landed this week!"""'}), "(name='team_patchset_merged_for_week1', get_date_scope=db_utils.\n DateScope.this_week, trigger_point=10, msg=\n '{owner} is doing great! 10 reviews landed this week!')\n", (1895, 2067), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((2169, 2354), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_merged_for_month1"""', 'get_date_scope': 'db_utils.DateScope.this_month', 'trigger_point': '(30)', 'msg': '"""{owner} rocks! 30 reviews merged this month already!"""'}), "(name='team_patchset_merged_for_month1', get_date_scope=db_utils\n .DateScope.this_month, trigger_point=30, msg=\n '{owner} rocks! 30 reviews merged this month already!')\n", (2180, 2354), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((2436, 2676), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_merged_for_month2"""', 'get_date_scope': 'db_utils.DateScope.this_month', 'trigger_point': '(50)', 'msg': '"""{owner} keeps putting +2 randomly: 50 reviews merged this month. And looks like the\'re not gonna stop!"""'}), '(name=\'team_patchset_merged_for_month2\', get_date_scope=db_utils\n .DateScope.this_month, trigger_point=50, msg=\n "{owner} keeps putting +2 randomly: 50 reviews merged this month. And looks like the\'re not gonna stop!"\n )\n', (2447, 2676), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((2932, 3150), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_reviewed_for_week1"""', 'get_date_scope': 'db_utils.DateScope.this_week', 'trigger_point': '(50)', 'msg': '"""{owner} is really keeps an eye on each other, made 50 reviews this week already!"""'}), "(name='team_patchset_reviewed_for_week1', get_date_scope=\n db_utils.DateScope.this_week, trigger_point=50, msg=\n '{owner} is really keeps an eye on each other, made 50 reviews this week already!'\n )\n", (2943, 3150), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n'), ((3270, 3486), 'scorebot.triggers.triggers_base.TeamTrigger', 'TeamTrigger', ([], {'name': '"""team_patchset_reviewed_for_month1"""', 'get_date_scope': 'db_utils.DateScope.this_month', 'trigger_point': '(100)', 'msg': '"""{owner} has great review activity this month, reached 100 reviews just now!"""'}), "(name='team_patchset_reviewed_for_month1', get_date_scope=\n db_utils.DateScope.this_month, trigger_point=100, msg=\n '{owner} has great review activity this month, reached 100 reviews just now!'\n )\n", (3281, 3486), False, 'from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers\n')]
|
import convergence_experiment
def executables():
return convergence_experiment.executables("Data/hospital_section_player/",
2000)
|
[
"convergence_experiment.executables"
] |
[((60, 133), 'convergence_experiment.executables', 'convergence_experiment.executables', (['"""Data/hospital_section_player/"""', '(2000)'], {}), "('Data/hospital_section_player/', 2000)\n", (94, 133), False, 'import convergence_experiment\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from keras.models import Sequential, Model
from keras.models import model_from_yaml
import keras.backend as K
from keras.callbacks import Callback
from ..utility.utils import path
def custom_uniform(shape, range=(-1, 1), name=None):
"""
Example of custom function for keras.
"""
min_, max_ = range
return K.variable(
np.random.uniform(low=min_, high=max_, size=shape), name=name)
# Example usage:
# net.add(Dense(10, input_dim=5, init=lambda shape,
# name: custom_uniform(shape, (-10, 5), name)))
class TestCallback(Callback):
"""
Example callback class for keras.
"""
def __init__(self, generator):
self.data_generator = generator
def on_epoch_end(self, epoch, logs={}):
x, y = next(self.data_generator)
loss, acc = self.model.evaluate(x, y, verbose=0)
print('\nTesting loss: {}, acc: {}\n'.format(loss, acc))
class Network(object):
"""
Base class for the various neural networks.
"""
def __init__(self):
self.metrics = ()
self.model = Sequential()
def first_layer_output(self, x):
weights = self.get_layer_weights(1)
W = weights[0]
b = weights[1]
return np.dot(x, W) + b
def predict_on_batch(self, x):
return self.model.predict_on_batch(x)
def get_weights(self, layer=None):
if layer is None:
return self.model.get_weights()
return self.model.layers[layer].get_weights()
def weight_shapes(self):
return self.get_weights()[0].shape, self.get_weights()[1].shape
def set_layer_weights(self, layer, weights):
self.model.layers[layer].set_weights(
[weights, self.get_weights(layer)[1]])
def set_layer_bias(self, layer, bias):
self.model.layers[layer].set_weights(
[self.get_weights(layer)[0], bias])
def set_layer_parameters(self, layer, weights, bias):
self.model.layers[layer].set_weights([weights, bias])
def get_layer_weights(self, layer):
return self.model.get_layer(index=layer).get_weights()
def train_once(self, data, batch_size):
self.model.fit(data[0], data[1], epochs=1, batch_size=batch_size)
def train_on_generator(self, training_set_generator, batches_per_epoch,
epochs, verbose):
h = self.model.fit_generator(
training_set_generator, batches_per_epoch, epochs, verbose=verbose)
loss = h.history['loss'][epochs - 1]
acc = h.history['categorical_accuracy'][epochs - 1]
self.metrics = '{0:.3g}'.format(loss), '{0:.3g}'.format(acc)
def save(self, relative_path, filename=None):
if filename is None:
filename = 'model'
absolute_path = ''.join([path(), relative_path, filename])
network_out = ''.join([absolute_path, '.yaml'])
weight_out = ''.join([absolute_path, '.h5'])
model_yaml = self.model.to_yaml()
with open(network_out, 'w') as yaml_file:
yaml_file.write(model_yaml)
self.model.save_weights(weight_out)
def load(self, relative_path, filename):
absolute_path = ''.join([path(), relative_path, filename])
network = ''.join([absolute_path, '.yaml'])
weights = ''.join([absolute_path, '.h5'])
with open(network, 'r') as yaml_file:
loaded_model_yaml = yaml_file.read()
self.model = model_from_yaml(loaded_model_yaml)
self.model.load_weights(weights)
|
[
"numpy.dot",
"numpy.random.uniform",
"keras.models.model_from_yaml",
"keras.models.Sequential"
] |
[((415, 465), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'min_', 'high': 'max_', 'size': 'shape'}), '(low=min_, high=max_, size=shape)\n', (432, 465), True, 'import numpy as np\n'), ((1139, 1151), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1149, 1151), False, 'from keras.models import Sequential, Model\n'), ((3502, 3536), 'keras.models.model_from_yaml', 'model_from_yaml', (['loaded_model_yaml'], {}), '(loaded_model_yaml)\n', (3517, 3536), False, 'from keras.models import model_from_yaml\n'), ((1296, 1308), 'numpy.dot', 'np.dot', (['x', 'W'], {}), '(x, W)\n', (1302, 1308), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# (C) 2017 <NAME>
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
from opentamiltests import *
import tamil.utf8 as utf8
from tamil.tscii import TSCII
import codecs
if PYTHON3:
class long(int):
pass
class Letters(unittest.TestCase):
def test_uyir_mei_split(self):
ak = utf8.splitMeiUyir(u"ஃ")
self.assertEqual(ak,u"ஃ")
il = utf8.splitMeiUyir(u"ல்")
self.assertEqual(il,u"ல்")
il,ee = utf8.splitMeiUyir(u"லி")
self.assertEqual((il,ee),(u"ல்",u"இ"))
def test_classifier(self):
expected = []
expected.extend(['english']*3)
expected.extend(['digit']*4)
expected.extend(['kuril','nedil','uyirmei','vallinam','uyirmei'])
data = list(map(utf8.classify_letter,utf8.get_letters(u"abc1230அஆரெட்டை")))
self.assertEqual(data,expected)
def demo(self):
for l in utf8.get_letters_iterable(u"இதுதாண்டாபோலிசு"):
print("%s - %s"%(l,utf8.classify_letter(l)))
def test_classified_except(self):
with self.assertRaises(ValueError) as ve:
utf8.classify_letter(u'.')
if __name__ == '__main__':
unittest.main()
|
[
"tamil.utf8.get_letters",
"tamil.utf8.classify_letter",
"tamil.utf8.get_letters_iterable",
"tamil.utf8.splitMeiUyir"
] |
[((344, 367), 'tamil.utf8.splitMeiUyir', 'utf8.splitMeiUyir', (['u"""ஃ"""'], {}), "(u'ஃ')\n", (361, 367), True, 'import tamil.utf8 as utf8\n'), ((415, 439), 'tamil.utf8.splitMeiUyir', 'utf8.splitMeiUyir', (['u"""ல்"""'], {}), "(u'ல்')\n", (432, 439), True, 'import tamil.utf8 as utf8\n'), ((491, 515), 'tamil.utf8.splitMeiUyir', 'utf8.splitMeiUyir', (['u"""லி"""'], {}), "(u'லி')\n", (508, 515), True, 'import tamil.utf8 as utf8\n'), ((929, 974), 'tamil.utf8.get_letters_iterable', 'utf8.get_letters_iterable', (['u"""இதுதாண்டாபோலிசு"""'], {}), "(u'இதுதாண்டாபோலிசு')\n", (954, 974), True, 'import tamil.utf8 as utf8\n'), ((1142, 1168), 'tamil.utf8.classify_letter', 'utf8.classify_letter', (['u"""."""'], {}), "(u'.')\n", (1162, 1168), True, 'import tamil.utf8 as utf8\n'), ((812, 848), 'tamil.utf8.get_letters', 'utf8.get_letters', (['u"""abc1230அஆரெட்டை"""'], {}), "(u'abc1230அஆரெட்டை')\n", (828, 848), True, 'import tamil.utf8 as utf8\n'), ((1007, 1030), 'tamil.utf8.classify_letter', 'utf8.classify_letter', (['l'], {}), '(l)\n', (1027, 1030), True, 'import tamil.utf8 as utf8\n')]
|
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
"""
Implementation of an EnumEditor demo for Traits UI
This demo shows each of the four styles of the EnumEditor
Fixme: This only shows the capabilities of the old-style EnumEditor
"""
# Imports:
from enthought.traits.api \
import HasTraits, Enum
from enthought.traits.ui.api \
import Item, Group, View
# Define the demo class:
class EnumEditorDemo ( HasTraits ):
""" Defines the main EnumEditor demo class. """
# Define an Enum trait to view:
name_list = Enum( 'A-495', 'A-498', 'R-1226', 'TS-17', 'TS-18' )
# Items are used to define the display, one Item per editor style:
enum_group = Group(
Item( 'name_list', style = 'simple', label = 'Simple' ),
Item( '_' ),
Item( 'name_list', style = 'custom', label = 'Custom' ),
Item( '_' ),
Item( 'name_list', style = 'text', label = 'Text' ),
Item( '_' ),
Item( 'name_list', style = 'readonly', label = 'ReadOnly' )
)
# Demo view:
view = View(
enum_group,
title = 'EnumEditor',
buttons = ['OK'],
resizable = True
)
# Create the demo:
demo = EnumEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
|
[
"enthought.traits.ui.api.Item",
"enthought.traits.ui.api.View",
"enthought.traits.api.Enum"
] |
[((577, 627), 'enthought.traits.api.Enum', 'Enum', (['"""A-495"""', '"""A-498"""', '"""R-1226"""', '"""TS-17"""', '"""TS-18"""'], {}), "('A-495', 'A-498', 'R-1226', 'TS-17', 'TS-18')\n", (581, 627), False, 'from enthought.traits.api import HasTraits, Enum\n'), ((1108, 1176), 'enthought.traits.ui.api.View', 'View', (['enum_group'], {'title': '"""EnumEditor"""', 'buttons': "['OK']", 'resizable': '(True)'}), "(enum_group, title='EnumEditor', buttons=['OK'], resizable=True)\n", (1112, 1176), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((738, 787), 'enthought.traits.ui.api.Item', 'Item', (['"""name_list"""'], {'style': '"""simple"""', 'label': '"""Simple"""'}), "('name_list', style='simple', label='Simple')\n", (742, 787), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((807, 816), 'enthought.traits.ui.api.Item', 'Item', (['"""_"""'], {}), "('_')\n", (811, 816), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((829, 878), 'enthought.traits.ui.api.Item', 'Item', (['"""name_list"""'], {'style': '"""custom"""', 'label': '"""Custom"""'}), "('name_list', style='custom', label='Custom')\n", (833, 878), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((898, 907), 'enthought.traits.ui.api.Item', 'Item', (['"""_"""'], {}), "('_')\n", (902, 907), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((920, 965), 'enthought.traits.ui.api.Item', 'Item', (['"""name_list"""'], {'style': '"""text"""', 'label': '"""Text"""'}), "('name_list', style='text', label='Text')\n", (924, 965), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((987, 996), 'enthought.traits.ui.api.Item', 'Item', (['"""_"""'], {}), "('_')\n", (991, 996), False, 'from enthought.traits.ui.api import Item, Group, View\n'), ((1009, 1062), 'enthought.traits.ui.api.Item', 'Item', (['"""name_list"""'], {'style': '"""readonly"""', 'label': '"""ReadOnly"""'}), "('name_list', style='readonly', label='ReadOnly')\n", (1013, 1062), False, 'from enthought.traits.ui.api import Item, Group, View\n')]
|
import numpy as np
import sys
sys.path.insert(0, '..')
from src.utils import *
class LinearRegression:
def __init__(self):
self.params = None
def train(self, X, y, iterations=5000, learning_rate=0.01, display=False):
'''
Input parameters:
X: (mxn) array where m is the number of training examples and n is the number of features
y: (mx1) array with target values
'''
# We initialize parameters as a (1xn) array of zeros
self.params = np.zeros((X.shape[1], 1))
loss_hist = np.zeros((1,0))
for i in range(iterations):
y_hat = X.dot(self.params)
loss = MeanSquaredError.loss(y, y_hat)
loss_hist = np.append(loss_hist, loss)
self.params = BatchGradientDescent.optimize(
X, y, y_hat, self.params, learning_rate, MeanSquaredError)
if display:
show_progress(i, iterations, loss)
if display:
print('\n')
return loss_hist, loss
def predict(self, X, y):
y_hat = X.dot(self.params)
loss = MeanSquaredError.loss(y, y_hat)
return y_hat, loss
|
[
"numpy.append",
"numpy.zeros",
"sys.path.insert"
] |
[((31, 55), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (46, 55), False, 'import sys\n'), ((511, 536), 'numpy.zeros', 'np.zeros', (['(X.shape[1], 1)'], {}), '((X.shape[1], 1))\n', (519, 536), True, 'import numpy as np\n'), ((558, 574), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (566, 574), True, 'import numpy as np\n'), ((725, 751), 'numpy.append', 'np.append', (['loss_hist', 'loss'], {}), '(loss_hist, loss)\n', (734, 751), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Class that implement draw of rectangles (for bar and line)
It use pygame.draw
methods :
- draw
- update
- resize
"""
import pygame
from .math_tools import PositionMathTool as pmt
from .math_tools import PositionValueObject as pval
class BarNLine:
def __init__(self):
self.input_remaning = 2
def draw(self, positions):
self.dct["from"] = pval(positions[0])
self.dct["to"] = pval(positions[1])
f = pval(positions[0])
t = pval(positions[1])
p = pval((0, 0))
of = pval((1, 1)) * self.dct[self.thickness_name]
f = f + p
t = t + p
p = pmt.min(f, t)
f = f - p + of
t = t - p + of
p = p - of
self._pos = p
self.dct["from"] = f
self.dct["to"] = t
def update(self):
h = self.dct[self.thickness_name] / 2
f = pval(self.dct["from"])
t = pval(self.dct["to"])
p = self._pos
of = pval((1, 1)) * self.dct[self.thickness_name] * 1
f = f + p
t = t + p
g = self.grid_step
f = pmt.discretize(f, g)
t = pmt.discretize(t, g)
p = pmt.min(f, t)
f = f - p + of
t = t - p + of
self._pos = p - of
self.dct["from"] = f
self.dct["to"] = t
dif = t - f
norm = dif.norm()
if norm != 0:
ratio = h / norm
else:
ratio = 1
w_p = ratio * dif ^ (0, 0, 1)
w_m = ratio * dif ^ (0, 0, -1)
c0 = f + w_m
c1 = f + w_p
c2 = t + w_p
c3 = t + w_m
corners = [c0, c1, c2, c3]
poly = [c.get() for c in corners]
rect = pmt.max(corners).get()
self.surface = pygame.Surface(rect, pygame.SRCALPHA)
self.surface.fill(pygame.Color("#77777720"))
self.shape = pygame.draw.polygon(self.surface, self.dct[self.color_name], poly)
self.mask = pygame.mask.from_surface(self.surface)
def resize(self, new_mouse_pos):
if self.dct["from"] != new_mouse_pos - self._pos:
self.dct["to"] = new_mouse_pos - self._pos
|
[
"pygame.mask.from_surface",
"pygame.draw.polygon",
"pygame.Color",
"pygame.Surface"
] |
[((1796, 1833), 'pygame.Surface', 'pygame.Surface', (['rect', 'pygame.SRCALPHA'], {}), '(rect, pygame.SRCALPHA)\n', (1810, 1833), False, 'import pygame\n'), ((1908, 1974), 'pygame.draw.polygon', 'pygame.draw.polygon', (['self.surface', 'self.dct[self.color_name]', 'poly'], {}), '(self.surface, self.dct[self.color_name], poly)\n', (1927, 1974), False, 'import pygame\n'), ((1995, 2033), 'pygame.mask.from_surface', 'pygame.mask.from_surface', (['self.surface'], {}), '(self.surface)\n', (2019, 2033), False, 'import pygame\n'), ((1860, 1885), 'pygame.Color', 'pygame.Color', (['"""#77777720"""'], {}), "('#77777720')\n", (1872, 1885), False, 'import pygame\n')]
|
import warnings
from django.template.loader import render_to_string
from django_cradmin import crsettings
def join_css_classes_list(css_classes_list):
"""
Join the provided list of css classes into a string.
"""
return ' '.join(css_classes_list)
class AbstractRenderable(object):
"""
An abstract class that implements an interface for
rendering something.
Everything is just helpers for the :meth:`.render` method,
which renders a template with an object of this class as
input.
"""
#: The default value for :meth:`.get_template_names`.
template_name = None
def get_template_names(self):
"""
Get the template name(s) for :meth:`.render`.
Defaults to :obj:`~.AbstractRenderable.template_name`.
Raises:
NotImplementedError: If :obj:`~.AbstractRenderable.template_name` is
not set.
"""
if self.template_name:
return self.template_name
else:
raise NotImplementedError('You must set template_name or override '
'get_template_names().')
def get_context_data(self, request=None):
"""
Get context data for :meth:`.render`.
Defaults to::
{
'me': self
}
"""
return {
'me': self
}
def render(self, request=None, extra_context_data=None):
"""
Render :obj:`.get_template_names` with
the context returned by :meth:`.get_context_data`.
Paramteters:
request (HttpRequest): If this is provided, we forward it to
:meth:`.get_context_data`, and to ``render_to_string()``
(which is used to render the template).
"""
context_data = {}
if extra_context_data:
context_data.update(extra_context_data)
context_data.update(self.get_context_data(request=request))
return render_to_string(
template_name=self.get_template_names(),
context=context_data,
request=request)
class AbstractRenderableWithCss(AbstractRenderable):
"""
Extends :class:`.AbstractRenderable` with a unified
API for setting CSS classes.
"""
def get_base_css_classes_list(self):
return []
def get_extra_css_classes_list(self):
return []
def get_css_classes_list(self):
"""
Override this to define css classes for the component.
Must return a list of css classes.
See :meth:`.get_css_classes_string`.
"""
css_classes_list = []
# if hasattr(self, 'get_base_css_classes_list'):
# warnings.warn("AbstractRenderableWithCss.get_base_css_classes_list() is deprectated "
# "- override get_css_classes_list() instead.",
# DeprecationWarning)
# css_classes_list.extend(self.get_base_css_classes_list())
# if hasattr(self, 'get_extra_css_classes_list'):
# warnings.warn("AbstractRenderableWithCss.get_extra_css_classes_list() is deprectated "
# "- override get_css_classes_list() instead.",
# DeprecationWarning)
# css_classes_list.extend(self.get_extra_css_classes_list())
css_classes_list.extend(self.get_base_css_classes_list())
css_classes_list.extend(self.get_extra_css_classes_list())
return css_classes_list
def get_test_css_class_suffixes_list(self):
"""
List of css class suffixes to include when running automatic tests.
These suffixes are filtered through the
:func:`~django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class`
template tag.
"""
return []
@property
def css_classes(self):
"""
Get css classes.
Joins :meth:`.get_css_classes_list` into a string.
You should not override this, override :meth:`.get_css_classes_list` instead.
"""
from django_cradmin.templatetags import cradmin_tags # Avoid circular import
css_classes = list(self.get_css_classes_list())
if crsettings.get_setting('DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES', False):
for css_class_suffix in self.get_test_css_class_suffixes_list():
css_classes.append(cradmin_tags.cradmin_test_css_class(css_class_suffix))
return join_css_classes_list(css_classes)
def get_css_classes_string(self):
warnings.warn("AbstractRenderableWithCss.get_css_classes_string() is deprectated "
"- use the AbstractRenderableWithCss.css_classes property instead.",
DeprecationWarning)
return self.css_classes
class AbstractBemRenderable(AbstractRenderable):
"""
Base class for renderables that uses BEM (http://getbem.com/)
for their CSS class naming.
This is an alternative to :class:`.AbstractRenderableWithCss`
that makes it much more natural to work with BEM.
"""
def __init__(self, bem_block=None, bem_element=None, bem_variant_list=None,
extra_css_classes_list=None):
"""
Args:
bem_block (str): Get the BEM block. Can not be supplied if ``bem_element`` is supplied.
bem_element (str): Get the BEM element. Can not be supplied if ``bem_block`` is supplied.
bem_variant_list (list): Get a list of BEM variants for the block/element.
You do not include the block/element, just the part after ``--``.
extra_css_classes_list (list): List of extra css classes.
"""
if bem_block and bem_element:
raise ValueError('Can not specify both bem_block and bem_element arguments.')
if bem_element and '__' not in bem_element:
raise ValueError('bem_element must contain __')
self._bem_block = bem_block
self._bem_element = bem_element
self._bem_variant_list = bem_variant_list or []
self._extra_css_classes_list = extra_css_classes_list or []
def get_test_css_class_suffixes_list(self):
"""
List of css class suffixes to include when running automatic tests.
These suffixes are filtered through the
:func:`~django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class`
template tag.
"""
return []
@property
def bem_block_or_element(self):
"""
Returns :meth:`.get_bem_block` falling back to :meth:`.get_bem_element`.
"""
return self.get_bem_block() or self.get_bem_element()
def get_bem_block(self):
"""
Get the bem block string.
"""
return self._bem_block
def get_bem_element(self):
"""
Get the bem element string.
"""
return self._bem_element
def get_bem_variant_list(self):
"""
Get a list of BEM variants.
You do not include the block/element, just the part after ``--``.
"""
return self._bem_variant_list
def get_extra_css_classes_list(self):
"""
Get a list of extra css classes.
"""
return self._extra_css_classes_list
@property
def css_classes(self):
"""
Get css classes as a string.
You should not override this, override :meth:`.get_bem_block` / :meth:`.get_bem_element`
and :meth:`.get_bem_variant_list` instead.
"""
from django_cradmin.templatetags import cradmin_tags # Avoid circular import
css_classes = []
if self.bem_block_or_element:
css_classes = [self.bem_block_or_element]
css_classes.extend(['{}--{}'.format(self.bem_block_or_element, variant)
for variant in self.get_bem_variant_list()])
if crsettings.get_setting('DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES', False):
for css_class_suffix in self.get_test_css_class_suffixes_list():
css_classes.append(cradmin_tags.cradmin_test_css_class(css_class_suffix))
css_classes.extend(self.get_extra_css_classes_list())
return join_css_classes_list(css_classes)
|
[
"warnings.warn",
"django_cradmin.crsettings.get_setting",
"django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class"
] |
[((4234, 4306), 'django_cradmin.crsettings.get_setting', 'crsettings.get_setting', (['"""DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES"""', '(False)'], {}), "('DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES', False)\n", (4256, 4306), False, 'from django_cradmin import crsettings\n'), ((4572, 4750), 'warnings.warn', 'warnings.warn', (['"""AbstractRenderableWithCss.get_css_classes_string() is deprectated - use the AbstractRenderableWithCss.css_classes property instead."""', 'DeprecationWarning'], {}), "(\n 'AbstractRenderableWithCss.get_css_classes_string() is deprectated - use the AbstractRenderableWithCss.css_classes property instead.'\n , DeprecationWarning)\n", (4585, 4750), False, 'import warnings\n'), ((7923, 7995), 'django_cradmin.crsettings.get_setting', 'crsettings.get_setting', (['"""DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES"""', '(False)'], {}), "('DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES', False)\n", (7945, 7995), False, 'from django_cradmin import crsettings\n'), ((4420, 4473), 'django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class', 'cradmin_tags.cradmin_test_css_class', (['css_class_suffix'], {}), '(css_class_suffix)\n', (4455, 4473), False, 'from django_cradmin.templatetags import cradmin_tags\n'), ((8109, 8162), 'django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class', 'cradmin_tags.cradmin_test_css_class', (['css_class_suffix'], {}), '(css_class_suffix)\n', (8144, 8162), False, 'from django_cradmin.templatetags import cradmin_tags\n')]
|
s = 'Hello Python'
# s.reverse()
print(s[::-1])
print(''.join(reversed(s)))
def reverse_i(s):
r = ''
for c in s:
r = c + r
return r
print(reverse_i(s))
def reverse_r(s):
if len(s) <= 1:
return s
else:
return reverse_r(s[1:]) + s[0]
print(reverse_r(s))
r = list(s)
r.reverse()
print(''.join(r))
def reverse_li(s):
r = []
for c in s:
r.insert(0, c)
return ''.join(r)
print(reverse_li(s))
print(''.join([s[i] for i in range(len(s)-1, -1, -1)]))
print(''.join(s[i] for i in range(len(s)-1, -1, -1)))
def reverse_g(s):
def sub(s):
for i in range(len(s)-1, -1, -1):
yield s[i]
return ''.join(sub(s))
print(reverse_g(s))
from collections import deque
r = deque(s)
r.reverse()
print(''.join(r))
|
[
"collections.deque"
] |
[((812, 820), 'collections.deque', 'deque', (['s'], {}), '(s)\n', (817, 820), False, 'from collections import deque\n')]
|
import os
from IGitt.GitLab import GitLabOAuthToken
from IGitt.GitLab.GitLab import GitLab
from IGitt.GitLab.GitLabComment import GitLabComment
from IGitt.GitLab.GitLabCommit import GitLabCommit
from IGitt.GitLab.GitLabIssue import GitLabIssue
from IGitt.GitLab.GitLabMergeRequest import GitLabMergeRequest
from IGitt.Interfaces import AccessLevel
from IGitt.Interfaces.Actions import IssueActions, MergeRequestActions, \
PipelineActions
from tests import IGittTestCase
class GitLabHosterTest(IGittTestCase):
def setUp(self):
self.gl = GitLab(GitLabOAuthToken(os.environ.get('GITLAB_TEST_TOKEN', '')))
def test_repo_permissions_inheritance(self):
repos = [
{
'namespace':{'id': 1, 'parent_id': None},
'permissions': {'group_access': {'access_level': 40},
'project_access': None}
},
{
'namespace': {'id': 2, 'parent_id': 1},
'permissions': {'group_access': None, 'project_access': None}
},
{
'namespace': {'id': 3, 'parent_id': 2},
'permissions': {'group_access': None, 'project_access': None}
},
{
'namespace': {'id': 4, 'parent_id': None},
'permissions': {'group_access': None,
'project_access': {'access_level': 40}}
}
]
self.assertEqual(set(map(lambda x: x['namespace']['id'],
GitLab._get_repos_with_permissions(
repos, AccessLevel.ADMIN))),
{1, 2, 3, 4})
def test_master_repositories(self):
self.assertEqual(sorted(map(lambda x: x.full_name, self.gl.master_repositories)),
['gitmate-test-user/test'])
def test_owned_repositories(self):
self.assertEqual(sorted(map(lambda x: x.full_name, self.gl.owned_repositories)),
['gitmate-test-user/test'])
def test_write_repositories(self):
self.assertEqual(sorted(map(lambda x: x.full_name, self.gl.write_repositories)),
['gitmate-test-user/test'])
def test_get_repo(self):
self.assertEqual(self.gl.get_repo('gitmate-test-user/test').full_name,
'gitmate-test-user/test')
class GitLabWebhookTest(IGittTestCase):
def setUp(self):
self.gl = GitLab(GitLabOAuthToken(
os.environ.get('GITLAB_TEST_TOKEN', '')))
self.repo_name = 'test/test'
self.default_data = {
'project': {
'path_with_namespace': self.repo_name,
},
'object_attributes': {
'id': 12,
'iid': 23,
'action': 'open',
'noteable_type': 'Issue',
'target': {
'path_with_namespace': 'gitmate-test-user/test'
}
},
'commit': {
'id': 'bcb<PASSWORD>',
},
'merge_request': {
'iid': 123,
},
'issue': {
'iid': 123,
'action': 'open',
},
'repository': {
'git_ssh_url': '<EMAIL>:gitmate-test-user/test.git'
}
}
def test_unknown_event(self):
with self.assertRaises(NotImplementedError):
list(self.gl.handle_webhook('unknown_event', self.default_data))
def test_issue_hook(self):
for event, obj in self.gl.handle_webhook('Issue Hook',
self.default_data):
self.assertEqual(event, IssueActions.OPENED)
self.assertIsInstance(obj[0], GitLabIssue)
def test_pr_hook(self):
for event, obj in self.gl.handle_webhook('Merge Request Hook',
self.default_data):
self.assertEqual(event, MergeRequestActions.OPENED)
self.assertIsInstance(obj[0], GitLabMergeRequest)
def test_pr_synchronized(self):
data = self.default_data
data['object_attributes']['oldrev'] = 'deadbeef'
for event, obj in self.gl.handle_webhook('Merge Request Hook',
self.default_data):
self.assertEqual(event, MergeRequestActions.SYNCHRONIZED)
self.assertIsInstance(obj[0], GitLabMergeRequest)
def test_issue_comment(self):
for event, obj in self.gl.handle_webhook('Note Hook',
self.default_data):
self.assertEqual(event, IssueActions.COMMENTED)
self.assertIsInstance(obj[0], GitLabIssue)
self.assertIsInstance(obj[1], GitLabComment)
def test_unsupported_comment(self):
data = self.default_data
data['object_attributes']['noteable_type'] = 'Snippet'
with self.assertRaises(NotImplementedError):
list(self.gl.handle_webhook('Note Hook', data))
def test_pr_comment(self):
data = self.default_data
del data['project']
data['object_attributes']['noteable_type'] = 'MergeRequest'
for event, obj in self.gl.handle_webhook('Note Hook', data):
self.assertEqual(event, MergeRequestActions.COMMENTED)
self.assertIsInstance(obj[0], GitLabMergeRequest)
self.assertIsInstance(obj[1], GitLabComment)
def test_status(self):
del self.default_data['project']
del self.default_data['object_attributes']
for event, obj in self.gl.handle_webhook('Pipeline Hook',
self.default_data):
self.assertEqual(event, PipelineActions.UPDATED)
self.assertIsInstance(obj[0], GitLabCommit)
def test_issue_label(self):
obj_attrs = self.default_data['object_attributes']
obj_attrs.update({'action': 'update'})
self.default_data.update({
'object_attributes': obj_attrs,
'changes': {
'labels': {
'previous': [{'title': 'old'}, {'title': 'old2'}],
'current': [{'title': 'new'}],
},
},
})
unlabeled_labels = set()
labeled_labels = set()
for event, obj in self.gl.handle_webhook('Issue Hook',
self.default_data):
self.assertIsInstance(obj[0], GitLabIssue)
if event == IssueActions.LABELED:
labeled_labels.add(obj[1])
elif event == IssueActions.UNLABELED:
unlabeled_labels.add(obj[1])
self.assertEqual(unlabeled_labels, {'old', 'old2'})
self.assertEqual(labeled_labels, {'new'})
def test_merge_request_label(self):
obj_attrs = self.default_data['object_attributes']
obj_attrs.update({'action': 'update'})
self.default_data.update({
'object_attributes': obj_attrs,
'changes': {
'labels': {
'previous': [{'title': 'old'}, {'title': 'old2'}],
'current': [{'title': 'new'}],
},
},
})
unlabeled_labels = set()
labeled_labels = set()
for event, obj in self.gl.handle_webhook('Merge Request Hook',
self.default_data):
self.assertIsInstance(obj[0], GitLabMergeRequest)
if event == MergeRequestActions.LABELED:
labeled_labels.add(obj[1])
elif event == MergeRequestActions.UNLABELED:
unlabeled_labels.add(obj[1])
self.assertEqual(unlabeled_labels, {'old', 'old2'})
self.assertEqual(labeled_labels, {'new'})
|
[
"os.environ.get",
"IGitt.GitLab.GitLab.GitLab._get_repos_with_permissions"
] |
[((581, 620), 'os.environ.get', 'os.environ.get', (['"""GITLAB_TEST_TOKEN"""', '""""""'], {}), "('GITLAB_TEST_TOKEN', '')\n", (595, 620), False, 'import os\n'), ((2519, 2558), 'os.environ.get', 'os.environ.get', (['"""GITLAB_TEST_TOKEN"""', '""""""'], {}), "('GITLAB_TEST_TOKEN', '')\n", (2533, 2558), False, 'import os\n'), ((1551, 1611), 'IGitt.GitLab.GitLab.GitLab._get_repos_with_permissions', 'GitLab._get_repos_with_permissions', (['repos', 'AccessLevel.ADMIN'], {}), '(repos, AccessLevel.ADMIN)\n', (1585, 1611), False, 'from IGitt.GitLab.GitLab import GitLab\n')]
|
"""
@brief test tree node (time=5s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import add_missing_development_version
class TestReferences(unittest.TestCase):
def test_references(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
paths = add_missing_development_version("pyquickhelper", __file__)
assert len(paths) <= 1 # no added paths if no need to add a path
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"pyquickhelper.loghelper.fLOG",
"pyquickhelper.pycode.add_missing_development_version"
] |
[((567, 582), 'unittest.main', 'unittest.main', ([], {}), '()\n', (580, 582), False, 'import unittest\n'), ((274, 346), 'pyquickhelper.loghelper.fLOG', 'fLOG', (['__file__', 'self._testMethodName'], {'OutputPrint': "(__name__ == '__main__')"}), "(__file__, self._testMethodName, OutputPrint=__name__ == '__main__')\n", (278, 346), False, 'from pyquickhelper.loghelper import fLOG\n'), ((401, 459), 'pyquickhelper.pycode.add_missing_development_version', 'add_missing_development_version', (['"""pyquickhelper"""', '__file__'], {}), "('pyquickhelper', __file__)\n", (432, 459), False, 'from pyquickhelper.pycode import add_missing_development_version\n')]
|
from collections import deque
import numpy as np
class Logger:
"""Print recorded values."""
def __init__(self, name):
"""
:param name str: identifier for printed value
"""
self.name = name
def __call__(self, value):
print("{}: {}".format(self.name, value))
class WindowFilterLogger:
"""Filter and print recorded values."""
def __init__(self, name, filter_size):
"""
:param name str: identifier for printed value
:param filter_size: number of historic samples which are averaged.
No output until filter_size number of values have been recorded.
"""
self.name = name
self.values = deque(maxlen=filter_size)
def __call__(self, value):
self.values.append(value)
if len(self.values) == self.values.maxlen:
print("{}: {}".format(self.name, np.mean(self.values)))
|
[
"numpy.mean",
"collections.deque"
] |
[((724, 749), 'collections.deque', 'deque', ([], {'maxlen': 'filter_size'}), '(maxlen=filter_size)\n', (729, 749), False, 'from collections import deque\n'), ((913, 933), 'numpy.mean', 'np.mean', (['self.values'], {}), '(self.values)\n', (920, 933), True, 'import numpy as np\n')]
|
from ctypes import *
from helper.types import GoString
import json
class Settings:
def __init__(self, lib):
self.lib = lib
self.lib.GetSettings.argtypes = []
self.lib.GetSettings.restype = c_char_p
def get_settings(self):
ret = self.lib.GetSettings()
return json.loads(ret)
|
[
"json.loads"
] |
[((312, 327), 'json.loads', 'json.loads', (['ret'], {}), '(ret)\n', (322, 327), False, 'import json\n')]
|
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import random
def karatsuba(x, y):
""" Recursive implementation of Karatsuba's Fast Mulciplication Algoritihm
:param x: The first integer
:param y: The second integer
:return: The product of x * y
"""
if x < 10 or y < 10:
return x*y
m = max(len(str(x)), len(str(y))) // 2
x_high = x // 10**m
x_low = x % 10**m
y_high = y // 10**m
y_low = y % 10**m
z0 = karatsuba(x_low, y_low)
z1 = karatsuba(x_low + x_high, y_low + y_high)
z2 = karatsuba(x_high, y_high)
return z2 * 10 ** (2 * m) + (z1 - z2 - z0) * 10 ** m + z0
def karat_compare(max_size, tests):
samples = []
test_sizes = np.linspace(1,max_size, tests).astype(int)
standard_results = []
karatsuba_results = []
for test_size in test_sizes:
x_str = ''
y_str = ''
for x in range(test_size):
x_str += str(random.randint(0,9))
y_str += str(random.randint(0,9))
samples.append((int(x_str), int(y_str)))
print(f"Samples Generated: {len(samples)}, with max size: {max_size}")
for sample, test_size in zip(samples, test_sizes):
print(f"Attempting numbers of 10^{test_size}")
x = sample[0]
y = sample[1]
t_start = time.perf_counter()
r = x * y
standard_results.append(time.perf_counter() - t_start)
t_start = time.perf_counter()
r = karatsuba(x, y)
karatsuba_results.append(time.perf_counter() - t_start)
plt.plot(test_size, standard_results, label="python native")
plt.plot(test_size, karatsuba_results, label="karatsuba")
plt.xlabel("10^x")
plt.ylabel("Seconds")
plt.legend()
plt.show()
def naive_matrix_multiplication_lists(a, b):
"""
Uses nested loops to calculate AB
:param a: An MxN matrix of numbers.
:param b: An NxP matrix of numbers.
:return: An MxP matrix of numbers which is the product: AB.
"""
M = len(a)
N = len(a[0])
if len(b) != N:
raise ValueError("The Matrices Provide are not the proper shape.")
P = len(b[0])
c = [[0 for i in range(P)] for j in range(M)]
for i in range(0,M):
for j in range(0,P):
for k in range(0,N):
c[i][j] += a[i][k] * b[k][j]
return c
def naive_matrix_multiplication_np(a,b):
M, N = a.shape
n, P = b.shape
if N != n:
raise ValueError("The Matrices Provide are not the proper shape.")
c = np.zeros((M,P))
for i in range(0,M):
for j in range(0,P):
for k in range(0,N):
c[i][j] += a[i][k] * b[k][j]
return c
if __name__ == "__main__":
a = [[1, 2, 5],
[3, 4, 6]]
b = [[5, 6],
[7, 8],
[1, 1]]
c = naive_matrix_multiplication_lists(a, b)
print("List Results:\n", c)
A = np.array(a)
B = np.array(b)
C = naive_matrix_multiplication_np(A, B)
print("NP Array Results:\n", C)
expected_results = np.array([[24, 27], [49, 56]])
print("Expected Results:\n", expected_results)
|
[
"matplotlib.pyplot.show",
"random.randint",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"time.perf_counter",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1572, 1632), 'matplotlib.pyplot.plot', 'plt.plot', (['test_size', 'standard_results'], {'label': '"""python native"""'}), "(test_size, standard_results, label='python native')\n", (1580, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1694), 'matplotlib.pyplot.plot', 'plt.plot', (['test_size', 'karatsuba_results'], {'label': '"""karatsuba"""'}), "(test_size, karatsuba_results, label='karatsuba')\n", (1645, 1694), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1717), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""10^x"""'], {}), "('10^x')\n", (1709, 1717), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds"""'], {}), "('Seconds')\n", (1732, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1760), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1758, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1773, 1775), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2561), 'numpy.zeros', 'np.zeros', (['(M, P)'], {}), '((M, P))\n', (2553, 2561), True, 'import numpy as np\n'), ((2920, 2931), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2928, 2931), True, 'import numpy as np\n'), ((2940, 2951), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (2948, 2951), True, 'import numpy as np\n'), ((3057, 3087), 'numpy.array', 'np.array', (['[[24, 27], [49, 56]]'], {}), '([[24, 27], [49, 56]])\n', (3065, 3087), True, 'import numpy as np\n'), ((1335, 1354), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1352, 1354), False, 'import time\n'), ((1455, 1474), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1472, 1474), False, 'import time\n'), ((743, 774), 'numpy.linspace', 'np.linspace', (['(1)', 'max_size', 'tests'], {}), '(1, max_size, tests)\n', (754, 774), True, 'import numpy as np\n'), ((970, 990), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (984, 990), False, 'import random\n'), ((1016, 1036), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1030, 1036), False, 'import random\n'), ((1405, 1424), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1422, 1424), False, 'import time\n'), ((1536, 1555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1553, 1555), False, 'import time\n')]
|
import dlib
import cv2
from os import path
from dataflow import ports
class ObjectDetectorOpencv:
def __init__(self, model):
self.detector = cv2.CascadeClassifier(model)
self.source_detection = ports.EventSource()
def detect_object(self, img):
detect = self.detector.detectMultiScale(img, 1.2, 7, 0, (50, 50))
if len(detect) > 0:
(x, y, w, h) = detect[0]
self.source_detection.fire(dlib.rectangle(int(x), int(y), int(x + w), int(y + h)))
class MultiObjectDetectorOpencv:
def __init__(self, models):
self.detectors = []
for model in models:
self.detectors.append(cv2.CascadeClassifier(model))
self.source_detection = ports.EventSource()
def detect_object(self, img):
for detector in self.detectors:
detect = detector.detectMultiScale(img, 1.1, 7, 0, (50, 50))
if len(detect) > 0:
(x, y, w, h) = detect[0]
self.source_detection.fire(dlib.rectangle(int(x), int(y), int(x + w), int(y + h)))
break
def FaceDetectorOpencv():
return ObjectDetectorOpencv(
path.join(path.dirname(path.abspath(__file__)), 'models/haarcascade_frontalface_alt2.xml'))
def HandDetectorOpencv():
return MultiObjectDetectorOpencv(
(
path.join(path.dirname(path.abspath(__file__)), 'models/palm.xml'),
path.join(path.dirname(path.abspath(__file__)), 'models/fist.xml'),
path.join(path.dirname(path.abspath(__file__)), 'models/closed_frontal_palm.xml'),
path.join(path.dirname(path.abspath(__file__)), 'models/aGest.xml')
))
|
[
"dataflow.ports.EventSource",
"os.path.abspath",
"cv2.CascadeClassifier"
] |
[((155, 183), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['model'], {}), '(model)\n', (176, 183), False, 'import cv2\n'), ((216, 235), 'dataflow.ports.EventSource', 'ports.EventSource', ([], {}), '()\n', (233, 235), False, 'from dataflow import ports\n'), ((725, 744), 'dataflow.ports.EventSource', 'ports.EventSource', ([], {}), '()\n', (742, 744), False, 'from dataflow import ports\n'), ((663, 691), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['model'], {}), '(model)\n', (684, 691), False, 'import cv2\n'), ((1179, 1201), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1191, 1201), False, 'from os import path\n'), ((1359, 1381), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1371, 1381), False, 'from os import path\n'), ((1439, 1461), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1451, 1461), False, 'from os import path\n'), ((1519, 1541), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1531, 1541), False, 'from os import path\n'), ((1614, 1636), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1626, 1636), False, 'from os import path\n')]
|
#=========================================================================
# VcdGenerationPass_test.py
#=========================================================================
# Perform limited tests on the VCD generation pass. These tests are limited
# in the sense that they do not compare the entire output against some
# reference output, which is hard to obtain in the case of VCD generation.
# Our goal is to have some regression test cases that can hopefully inform
# us of any incompatible changes that lead to the failure of VCD generation
# during a major update of PyMTL.
#
# Author: <NAME>
# Date: Nov 1, 2019
from pymtl3.datatypes import *
from pymtl3.dsl import *
from pymtl3.passes import TracingConfigs
from pymtl3.passes.PassGroups import SimulationPass
def run_test( dut, tv, tv_in, tv_out ):
vcd_file_name = dut.__class__.__name__ + "_funky"
dut.config_tracing = TracingConfigs( tracing='vcd', vcd_file_name=vcd_file_name )
dut.elaborate()
dut.apply( SimulationPass() )
for v in tv:
tv_in( dut, v )
dut.tick()
tv_out( dut, v )
with open(vcd_file_name+".vcd") as fd:
file_str = ''.join( fd.readlines() )
all_signals = dut.get_input_value_ports() + \
dut.get_output_value_ports() + \
dut.get_wires()
for signal in all_signals:
assert signal._dsl.my_name in file_str
def test_vector_signals():
class A( Component ):
def construct( s ):
s.in0 = InPort( Bits32 )
s.in1 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def add_upblk():
s.out = s.in0 + s.in1
def tv_in( m, tv ):
m.in0 = tv[0]
m.in1 = tv[1]
def tv_out( m, tv ):
assert m.out == tv[2]
run_test( A(), [
# in0 in1 out
[ b32(0), b32(-1), b32(-1), ],
[ b32(1), b32(1), b32(2), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(42), b32(42), b32(84), ],
], tv_in, tv_out )
def test_bitstruct_signals():
bs = mk_bitstruct( "BitStructType", {
'foo' : Bits1,
'bar' : Bits32,
} )
class A2( Component ):
def construct( s ):
s.in0 = InPort( bs )
s.in1 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def add_upblk():
s.out = s.in0.bar + s.in1
def tv_in( m, tv ):
m.in0 = tv[0]
m.in1 = tv[1]
def tv_out( m, tv ):
assert m.out == tv[2]
run_test( A2(), [
# in0 in1 out
[ bs(b1(0), b32(0)), b32(-1), b32(-1), ],
[ bs(b1(0), b32(1)), b32(1), b32(2), ],
[ bs(b1(0), b32(-1)), b32(0), b32(-1), ],
[ bs(b1(0), b32(42)), b32(42), b32(84), ],
], tv_in, tv_out )
|
[
"pymtl3.passes.TracingConfigs",
"pymtl3.passes.PassGroups.SimulationPass"
] |
[((893, 951), 'pymtl3.passes.TracingConfigs', 'TracingConfigs', ([], {'tracing': '"""vcd"""', 'vcd_file_name': 'vcd_file_name'}), "(tracing='vcd', vcd_file_name=vcd_file_name)\n", (907, 951), False, 'from pymtl3.passes import TracingConfigs\n'), ((985, 1001), 'pymtl3.passes.PassGroups.SimulationPass', 'SimulationPass', ([], {}), '()\n', (999, 1001), False, 'from pymtl3.passes.PassGroups import SimulationPass\n')]
|
import click
import sys
from collections import namedtuple
from random import randint
Ctx = namedtuple('Ctx', ['ctl', 'ssh', 'ssh_cfg'])
@click.group()
@click.pass_context
@click.option('--host', default='vdi.nci.org.au', help='Customize vdi login node')
@click.option('--user', help='SSH user name, if not given will be read from ~/.ssh/config')
@click.option('--no-ask', is_flag=True, help='Do not ask for passwords')
def cli(ctx, host, user, no_ask):
""" Control and query info about VDI sessions
"""
from ._ssh import open_ssh
from .vdi import vdi_ctl
try:
ssh, ssh_cfg = open_ssh(host, user, no_ask=no_ask)
except:
click.echo('Failed to connect to "{}{}"'.format(user+'@' if user else '', host))
ctx.exit()
ctl = vdi_ctl(ssh)
ctx.obj = Ctx(ssh=ssh, ssh_cfg=ssh_cfg, ctl=ctl)
@cli.command('launch')
@click.pass_obj
@click.option('--force', is_flag=True, help='Launch new session even if one is already running')
def launch(ctx, force):
""" Launch session if not running
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) != 0 and not force:
click.echo('Job already running', err=True)
sys.exit(1)
job = ctl('launch', '--partition', 'main')
click.echo(job.get('id'))
return 0
@cli.command('terminate')
@click.pass_obj
def terminate(ctx):
""" Shutdown session (all sessions actually)
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
for job in jobs:
jobid = job['id']
click.echo('Terminating {}'.format(jobid))
ctl('terminate', '--jobid', jobid)
@cli.command('host')
@click.pass_obj
def hostname(ctx):
""" Print hostname for every active session
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
host = ctl('get-host', '--jobid', job['id']).get('host')
click.echo(host)
return 0
@cli.command('get-passwd')
@click.pass_obj
def get_passwd(ctx):
""" Print VNC password
"""
ctl = ctx.ctl
password = ctl('get-passwd').get('passwd')
if password is None:
click.echo('Failed to query VNC password', err=True)
sys.exit(1)
click.echo(password)
return 0
def collect_vnc_info(ctl, job_id, ssh_cfg):
from ._ssh import mk_ssh
from .vdi import vdi_ctl
cfg = dict(**ssh_cfg)
host = ctl('get-host', '--jobid', job_id).get('host')
passwd = ctl('get-passwd').get('passwd')
cfg['hostname'] = host
try:
client_ctl = vdi_ctl(mk_ssh(cfg))
except:
click.echo('Failed to connect to {}'.format(host), err=True)
sys.exit(2)
display = client_ctl('get-display-nbr').get('display')
if display is None:
click.echo('Failed to query display {}'.format(host), err=True)
sys.exit(3)
try:
display = int(display[1:]) # Parse `:2`
except ValueError:
click.echo('Failed to parse display number: "{}"'.format(display))
sys.exit(3)
return dict(host=host,
display=display,
port=display+5900,
passwd=passwd)
def get_vnc_tunnel_cmd(ctx, job_id, local_port):
v_map = {True: 'yes', False: 'no'}
opts = dict(
PasswordAuthentication=False,
ChallengeResponseAuthentication=False,
KbdInteractiveAuthentication=False,
PubkeyAuthentication=True,
StrictHostKeyChecking=True,
)
args = ['-T'] + ['-o{}={}'.format(k, v_map.get(v, v))
for k, v in opts.items()]
cmd = '/opt/vdi/bin/session-ctl --configver=20173552330 tunnel'.split(' ')
user = ctx.ssh_cfg.get('user')
if user is not None:
args.extend(['-l', user])
info = collect_vnc_info(ctx.ctl, job_id, ctx.ssh_cfg)
fwd_args = ['-L',
'{local_port}:127.0.0.1:{remote_port} {host}'.format(
local_port=local_port,
remote_port=info['port'],
host=info['host'])]
return ['ssh'] + args + fwd_args + cmd
@cli.command('display-nbr')
@click.option('--as-port', is_flag=True, help='Print it as a port number of the VNC server')
@click.pass_obj
def display_nbr(ctx, as_port=False):
""" Print display number for active session (s)
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
info = collect_vnc_info(ctl, job['id'], ctx.ssh_cfg)
if as_port:
click.echo('%d' % info['port'])
else:
click.echo(':%d' % info['display'])
@cli.command('vnc-tunnel-cmd')
@click.option('--local-port', type=int, default=0, help='Local port to use for ssh forwarding')
@click.pass_obj
def vnc_tunnel_cmd(ctx, local_port=0):
""" Print port forwarding command
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
local_port = local_port or randint(10000, 65000)
for job in jobs:
cmd = get_vnc_tunnel_cmd(ctx, job['id'], local_port)
click.echo(' '.join(cmd))
@cli.command('nbconnect')
@click.option('--local-port', type=int, default=0, help='Local port to use for ssh forwarding')
@click.option('--runtime-dir', help='Jupyter runtime dir on a remote `jupyter --runtime-dir`')
@click.pass_obj
def nbconnect(ctx, local_port=0, runtime_dir=None):
""" Connect to notebook on VDI
"""
from ._ssh import mk_ssh
from .nbconnect import run_nb_tunnel
ctl = ctx.ctl
ssh_cfg = ctx.ssh_cfg
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
host = ctl('get-host', '--jobid', job['id']).get('host')
ssh_cfg['hostname'] = host
try:
ssh = mk_ssh(ssh_cfg)
except:
click.echo('Failed to connect to {}'.format(host))
sys.exit(2)
sys.exit(run_nb_tunnel(ssh, ssh_cfg, runtime_dir=runtime_dir, local_port=local_port))
def _cli():
cli(obj={})
if __name__ == '__main__':
_cli()
|
[
"random.randint",
"click.option",
"click.echo",
"collections.namedtuple",
"click.group",
"sys.exit"
] |
[((93, 137), 'collections.namedtuple', 'namedtuple', (['"""Ctx"""', "['ctl', 'ssh', 'ssh_cfg']"], {}), "('Ctx', ['ctl', 'ssh', 'ssh_cfg'])\n", (103, 137), False, 'from collections import namedtuple\n'), ((141, 154), 'click.group', 'click.group', ([], {}), '()\n', (152, 154), False, 'import click\n'), ((176, 262), 'click.option', 'click.option', (['"""--host"""'], {'default': '"""vdi.nci.org.au"""', 'help': '"""Customize vdi login node"""'}), "('--host', default='vdi.nci.org.au', help=\n 'Customize vdi login node')\n", (188, 262), False, 'import click\n'), ((259, 354), 'click.option', 'click.option', (['"""--user"""'], {'help': '"""SSH user name, if not given will be read from ~/.ssh/config"""'}), "('--user', help=\n 'SSH user name, if not given will be read from ~/.ssh/config')\n", (271, 354), False, 'import click\n'), ((351, 422), 'click.option', 'click.option', (['"""--no-ask"""'], {'is_flag': '(True)', 'help': '"""Do not ask for passwords"""'}), "('--no-ask', is_flag=True, help='Do not ask for passwords')\n", (363, 422), False, 'import click\n'), ((884, 984), 'click.option', 'click.option', (['"""--force"""'], {'is_flag': '(True)', 'help': '"""Launch new session even if one is already running"""'}), "('--force', is_flag=True, help=\n 'Launch new session even if one is already running')\n", (896, 984), False, 'import click\n'), ((4261, 4357), 'click.option', 'click.option', (['"""--as-port"""'], {'is_flag': '(True)', 'help': '"""Print it as a port number of the VNC server"""'}), "('--as-port', is_flag=True, help=\n 'Print it as a port number of the VNC server')\n", (4273, 4357), False, 'import click\n'), ((4888, 4987), 'click.option', 'click.option', (['"""--local-port"""'], {'type': 'int', 'default': '(0)', 'help': '"""Local port to use for ssh forwarding"""'}), "('--local-port', type=int, default=0, help=\n 'Local port to use for ssh forwarding')\n", (4900, 4987), False, 'import click\n'), ((5462, 5561), 'click.option', 'click.option', (['"""--local-port"""'], {'type': 'int', 'default': '(0)', 'help': '"""Local port to use for ssh forwarding"""'}), "('--local-port', type=int, default=0, help=\n 'Local port to use for ssh forwarding')\n", (5474, 5561), False, 'import click\n'), ((5558, 5656), 'click.option', 'click.option', (['"""--runtime-dir"""'], {'help': '"""Jupyter runtime dir on a remote `jupyter --runtime-dir`"""'}), "('--runtime-dir', help=\n 'Jupyter runtime dir on a remote `jupyter --runtime-dir`')\n", (5570, 5656), False, 'import click\n'), ((2382, 2402), 'click.echo', 'click.echo', (['password'], {}), '(password)\n', (2392, 2402), False, 'import click\n'), ((1181, 1224), 'click.echo', 'click.echo', (['"""Job already running"""'], {'err': '(True)'}), "('Job already running', err=True)\n", (1191, 1224), False, 'import click\n'), ((1233, 1244), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1241, 1244), False, 'import sys\n'), ((1917, 1956), 'click.echo', 'click.echo', (['"""No jobs running"""'], {'err': '(True)'}), "('No jobs running', err=True)\n", (1927, 1956), False, 'import click\n'), ((1965, 1976), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1973, 1976), False, 'import sys\n'), ((2072, 2088), 'click.echo', 'click.echo', (['host'], {}), '(host)\n', (2082, 2088), False, 'import click\n'), ((2304, 2356), 'click.echo', 'click.echo', (['"""Failed to query VNC password"""'], {'err': '(True)'}), "('Failed to query VNC password', err=True)\n", (2314, 2356), False, 'import click\n'), ((2365, 2376), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2373, 2376), False, 'import sys\n'), ((2994, 3005), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (3002, 3005), False, 'import sys\n'), ((4584, 4623), 'click.echo', 'click.echo', (['"""No jobs running"""'], {'err': '(True)'}), "('No jobs running', err=True)\n", (4594, 4623), False, 'import click\n'), ((4632, 4643), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4640, 4643), False, 'import sys\n'), ((5202, 5241), 'click.echo', 'click.echo', (['"""No jobs running"""'], {'err': '(True)'}), "('No jobs running', err=True)\n", (5212, 5241), False, 'import click\n'), ((5250, 5261), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5258, 5261), False, 'import sys\n'), ((5294, 5315), 'random.randint', 'randint', (['(10000)', '(65000)'], {}), '(10000, 65000)\n', (5301, 5315), False, 'from random import randint\n'), ((5978, 6017), 'click.echo', 'click.echo', (['"""No jobs running"""'], {'err': '(True)'}), "('No jobs running', err=True)\n", (5988, 6017), False, 'import click\n'), ((6026, 6037), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6034, 6037), False, 'import sys\n'), ((2818, 2829), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2826, 2829), False, 'import sys\n'), ((3171, 3182), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (3179, 3182), False, 'import sys\n'), ((4760, 4791), 'click.echo', 'click.echo', (["('%d' % info['port'])"], {}), "('%d' % info['port'])\n", (4770, 4791), False, 'import click\n'), ((4818, 4853), 'click.echo', 'click.echo', (["(':%d' % info['display'])"], {}), "(':%d' % info['display'])\n", (4828, 4853), False, 'import click\n'), ((6298, 6309), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (6306, 6309), False, 'import sys\n')]
|
import pytest
# test transforming tidb check config to openmetrics check config
from datadog_checks.base.utils.tagging import GENERIC_TAGS
from datadog_checks.tidb import TiDBCheck
from .conftest import EXPECTED_PD, EXPECTED_TIDB, EXPECTED_TIKV
@pytest.mark.unit
def test_create_check_instance_transform(tidb_instance):
check = TiDBCheck("test_config_transform", {}, [tidb_instance])
assert check.instance.get('prometheus_url') == 'http://localhost:10080/metrics'
assert check.instance.get('namespace') == 'tidb_cluster'
assert check.instance.get('tags') == ['tidb_cluster_name:test', 'tidb_cluster_component:tidb']
mapper = check.instance.get('labels_mapper')
for label in GENERIC_TAGS:
assert mapper.get(label) == label + "_in_app"
@pytest.mark.unit
def test_tidb_mock_metrics(aggregator, mock_tidb_metrics, tidb_instance):
check = TiDBCheck("test_tidb_mock_metrics", {}, [tidb_instance])
_check_and_assert(aggregator, EXPECTED_TIDB, check)
@pytest.mark.unit
def test_pd_mock_metrics(aggregator, mock_pd_metrics, pd_instance):
check = TiDBCheck("test_pd_mock_metrics", {}, [pd_instance])
_check_and_assert(aggregator, EXPECTED_PD, check)
@pytest.mark.unit
def test_tikv_mock_metrics(aggregator, mock_tikv_metrics, tikv_instance):
check = TiDBCheck("test_tidb_mock_metrics", {}, [tikv_instance])
_check_and_assert(aggregator, EXPECTED_TIKV, check)
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_cluster_metrics(aggregator, pd_instance, tikv_instance, tidb_instance):
check = TiDBCheck("test_cluster_metrics", {}, [tidb_instance])
_check_and_assert(aggregator, EXPECTED_TIDB, check)
check = TiDBCheck("test_cluster_metrics", {}, [pd_instance])
_check_and_assert(aggregator, EXPECTED_PD, check)
check = TiDBCheck("test_cluster_metrics", {}, [tikv_instance])
_check_and_assert(aggregator, EXPECTED_TIKV, check)
def _check_and_assert(agg, expected, c):
c.check(c.instance)
for name, tags in expected['metrics'].items():
agg.assert_metric(name, tags=tags)
for name, tags in expected['service_check'].items():
agg.assert_service_check(name, status=TiDBCheck.OK, tags=tags)
# since tidb cluster metrics cannot be listed thoroughly, we disable all completeness assertions here
# agg.assert_all_metrics_covered()
# agg.assert_metrics_using_metadata(get_metadata_metrics(), check_metric_type=False)
|
[
"datadog_checks.tidb.TiDBCheck",
"pytest.mark.usefixtures"
] |
[((1443, 1484), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""dd_environment"""'], {}), "('dd_environment')\n", (1466, 1484), False, 'import pytest\n'), ((336, 391), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_config_transform"""', '{}', '[tidb_instance]'], {}), "('test_config_transform', {}, [tidb_instance])\n", (345, 391), False, 'from datadog_checks.tidb import TiDBCheck\n'), ((876, 932), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_tidb_mock_metrics"""', '{}', '[tidb_instance]'], {}), "('test_tidb_mock_metrics', {}, [tidb_instance])\n", (885, 932), False, 'from datadog_checks.tidb import TiDBCheck\n'), ((1089, 1141), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_pd_mock_metrics"""', '{}', '[pd_instance]'], {}), "('test_pd_mock_metrics', {}, [pd_instance])\n", (1098, 1141), False, 'from datadog_checks.tidb import TiDBCheck\n'), ((1302, 1358), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_tidb_mock_metrics"""', '{}', '[tikv_instance]'], {}), "('test_tidb_mock_metrics', {}, [tikv_instance])\n", (1311, 1358), False, 'from datadog_checks.tidb import TiDBCheck\n'), ((1578, 1632), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_cluster_metrics"""', '{}', '[tidb_instance]'], {}), "('test_cluster_metrics', {}, [tidb_instance])\n", (1587, 1632), False, 'from datadog_checks.tidb import TiDBCheck\n'), ((1701, 1753), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_cluster_metrics"""', '{}', '[pd_instance]'], {}), "('test_cluster_metrics', {}, [pd_instance])\n", (1710, 1753), False, 'from datadog_checks.tidb import TiDBCheck\n'), ((1820, 1874), 'datadog_checks.tidb.TiDBCheck', 'TiDBCheck', (['"""test_cluster_metrics"""', '{}', '[tikv_instance]'], {}), "('test_cluster_metrics', {}, [tikv_instance])\n", (1829, 1874), False, 'from datadog_checks.tidb import TiDBCheck\n')]
|
import pytest
from requre.online_replacing import record_requests_for_all_methods
from tests.integration.pagure.base import PagureTests
from ogr.exceptions import OgrException
@record_requests_for_all_methods()
class Service(PagureTests):
def test_project_create(self):
"""
Remove https://pagure.io/"name" before data regeneration
in case you are not owner of repo, create your
"""
name = "new-ogr-testing-repo-jscotka"
project = self.service.get_project(repo=name, namespace=None)
assert not project.exists()
new_project = self.service.project_create(repo=name)
assert new_project.exists()
assert new_project.repo == name
project = self.service.get_project(repo=name, namespace=None)
assert project.exists()
def test_project_create_with_description(self):
"""
Remove https://pagure.io/"name" before data regeneration
in case you are not owner of repo, create your
"""
name = "new-ogr-testing-repo-with-description"
description = "The description of the newly created project."
project = self.service.get_project(repo=name, namespace=None)
assert not project.exists()
new_project = self.service.project_create(repo=name, description=description)
assert new_project.exists()
assert new_project.repo == name
assert new_project.get_description() == description
project = self.service.get_project(repo=name, namespace=None)
assert project.exists()
assert new_project.get_description() == description
def test_project_create_in_the_group(self):
"""
Remove https://pagure.io/packit-service/new-ogr-testing-repo-in-the-group
before data regeneration, if you have rigths to remove it, in other case
create your suffix
"""
name = "new-ogr-testing-repo-in-the-group-jscotka"
namespace = "packit-service"
project = self.service.get_project(repo=name, namespace=namespace)
assert not project.exists()
new_project = self.service.project_create(repo=name, namespace=namespace)
assert new_project.exists()
assert new_project.repo == name
project = self.service.get_project(repo=name, namespace=namespace)
assert project.exists()
def test_project_create_invalid_namespace(self):
name = "new-ogr-testing-repo"
namespace = "nonexisting"
with pytest.raises(OgrException, match=r".*Namespace doesn't exist.*"):
self.service.project_create(repo=name, namespace=namespace)
project = self.service.get_project(repo=name, namespace=namespace)
assert not project.exists()
def test_project_create_unauthorized_namespace(self):
name = "new-ogr-testing-repo"
namespace = "fedora-magazine"
with pytest.raises(
OgrException, match=r".*Cannot create project in given namespace.*"
):
self.service.project_create(repo=name, namespace=namespace)
project = self.service.get_project(repo=name, namespace=namespace)
assert not project.exists()
|
[
"pytest.raises",
"requre.online_replacing.record_requests_for_all_methods"
] |
[((180, 213), 'requre.online_replacing.record_requests_for_all_methods', 'record_requests_for_all_methods', ([], {}), '()\n', (211, 213), False, 'from requre.online_replacing import record_requests_for_all_methods\n'), ((2505, 2569), 'pytest.raises', 'pytest.raises', (['OgrException'], {'match': '""".*Namespace doesn\'t exist.*"""'}), '(OgrException, match=".*Namespace doesn\'t exist.*")\n', (2518, 2569), False, 'import pytest\n'), ((2904, 2990), 'pytest.raises', 'pytest.raises', (['OgrException'], {'match': '""".*Cannot create project in given namespace.*"""'}), "(OgrException, match=\n '.*Cannot create project in given namespace.*')\n", (2917, 2990), False, 'import pytest\n')]
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class JudgementResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'output': 'str',
'file_id': 'str',
'image_id': 'str',
'case_count': 'int',
'executed_count': 'int',
'testcases': 'list[JudgementCaseResult]'
}
attribute_map = {
'output': 'output',
'file_id': 'file_id',
'image_id': 'image_id',
'case_count': 'case_count',
'executed_count': 'executed_count',
'testcases': 'testcases'
}
def __init__(self, output=None, file_id=None, image_id=None, case_count=None, executed_count=None, testcases=None):
"""JudgementResult - a model defined in huaweicloud sdk"""
self._output = None
self._file_id = None
self._image_id = None
self._case_count = None
self._executed_count = None
self._testcases = None
self.discriminator = None
self.output = output
self.file_id = file_id
self.image_id = image_id
self.case_count = case_count
self.executed_count = executed_count
self.testcases = testcases
@property
def output(self):
"""Gets the output of this JudgementResult.
标准类型输出结果
:return: The output of this JudgementResult.
:rtype: str
"""
return self._output
@output.setter
def output(self, output):
"""Sets the output of this JudgementResult.
标准类型输出结果
:param output: The output of this JudgementResult.
:type: str
"""
self._output = output
@property
def file_id(self):
"""Gets the file_id of this JudgementResult.
文件形式输出的文件id,可根据文件id下载详情
:return: The file_id of this JudgementResult.
:rtype: str
"""
return self._file_id
@file_id.setter
def file_id(self, file_id):
"""Sets the file_id of this JudgementResult.
文件形式输出的文件id,可根据文件id下载详情
:param file_id: The file_id of this JudgementResult.
:type: str
"""
self._file_id = file_id
@property
def image_id(self):
"""Gets the image_id of this JudgementResult.
图片形式输出的图片id,可根据图片id下载详情
:return: The image_id of this JudgementResult.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""Sets the image_id of this JudgementResult.
图片形式输出的图片id,可根据图片id下载详情
:param image_id: The image_id of this JudgementResult.
:type: str
"""
self._image_id = image_id
@property
def case_count(self):
"""Gets the case_count of this JudgementResult.
用例形式输出的用例总个数
:return: The case_count of this JudgementResult.
:rtype: int
"""
return self._case_count
@case_count.setter
def case_count(self, case_count):
"""Sets the case_count of this JudgementResult.
用例形式输出的用例总个数
:param case_count: The case_count of this JudgementResult.
:type: int
"""
self._case_count = case_count
@property
def executed_count(self):
"""Gets the executed_count of this JudgementResult.
用例形式输出的已执行用例的个数
:return: The executed_count of this JudgementResult.
:rtype: int
"""
return self._executed_count
@executed_count.setter
def executed_count(self, executed_count):
"""Sets the executed_count of this JudgementResult.
用例形式输出的已执行用例的个数
:param executed_count: The executed_count of this JudgementResult.
:type: int
"""
self._executed_count = executed_count
@property
def testcases(self):
"""Gets the testcases of this JudgementResult.
用例形式输出的已执行用例的结果
:return: The testcases of this JudgementResult.
:rtype: list[JudgementCaseResult]
"""
return self._testcases
@testcases.setter
def testcases(self, testcases):
"""Sets the testcases of this JudgementResult.
用例形式输出的已执行用例的结果
:param testcases: The testcases of this JudgementResult.
:type: list[JudgementCaseResult]
"""
self._testcases = testcases
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JudgementResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization",
"six.iteritems",
"sys.setdefaultencoding"
] |
[((4775, 4808), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (4788, 4808), False, 'import six\n'), ((5793, 5824), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (5815, 5824), False, 'import sys\n'), ((5851, 5883), 'huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization', 'sanitize_for_serialization', (['self'], {}), '(self)\n', (5877, 5883), False, 'from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n')]
|
import math
import os
from random import random
import psycopg2
import psycopg2.extras
from config.config import config
from models.Club import Club
from models.Player import Player
from models.Position import Position
from models.Tournament import Tournament
from faker import Faker
class FootballDatabase(object):
def __init__(self):
self.conn = None
def exec_script_file(self, script_file_name: str) -> None:
script_file = open('{0}\scripts\{1}'.format(os.path.dirname(__file__), script_file_name), 'r')
with self.get_cursor() as cur:
cur.execute(script_file.read())
self.conn.commit()
def get_cursor(self):
return self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
def connect(self) -> None:
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
self.conn = psycopg2.connect(**params)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def close_connection(self) -> None:
if self.conn is not None:
self.conn.close()
print('Database connection closed.')
def generate_random_players(self):
fake = Faker()
script = """INSERT INTO players(first_name, last_name, date_of_birth, is_injured, height, position_id, club_id)
VALUES(%s, %s, %s, %s, %s, %s, (SELECT club_id FROM clubs ORDER BY random() LIMIT 1));"""
with self.get_cursor() as cur:
for i in range(1000):
cur.execute(script, [fake.first_name_male(),
fake.last_name_male(),
fake.date_of_birth(tzinfo=None, minimum_age=17, maximum_age=35),
random() > 0.5,
math.ceil(random() * 39 + 160),
math.ceil(random() * 3 + 1)])
def generate_random_clubs(self):
fake = Faker()
clubs_amount = 100
club_names = fake.words(nb=clubs_amount, ext_word_list=None, unique=True)
script = """INSERT INTO clubs(name, creation_date, number_of_trophies) VALUES (%s, %s, %s);"""
with self.get_cursor() as cur:
for i in range(clubs_amount):
cur.execute(script, [club_names[i],
fake.date_of_birth(tzinfo=None, minimum_age=5, maximum_age=200),
math.ceil(random() * 29 + 1)])
def generate_random_tournaments(self):
fake = Faker()
script = """INSERT INTO tournaments(name, description) VALUES (%s, %s);"""
with self.get_cursor() as cur:
for i in range(20):
cur.execute(script, [fake.word(), fake.text()])
def text_search_by_words(self, words: list) -> list:
search_words = ' & '.join(words)
script = """SELECT id, ts_headline('english', description, q) description, name
FROM (SELECT tournament_id id, description, name, q
FROM tournaments, to_tsquery('english', %s) q
WHERE tsv @@ q) AS t;"""
with self.get_cursor() as cur:
cur.execute(script, [search_words])
tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name'], description=t['description']) for t in tournaments]
def text_search_by_phrase(self, phrase: str) -> list:
script = """SELECT id, ts_headline('english', description, q) description, name
FROM (SELECT tournament_id id, description, name, q
FROM tournaments, phraseto_tsquery('english', %s) q
WHERE tsv @@ q) AS t;"""
with self.get_cursor() as cur:
cur.execute(script, [phrase])
tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name'], description=t['description']) for t in tournaments]
def advanced_player_search(self,
min_height: int,
max_height: int,
min_number: int,
max_number: int,
position_id: int) -> list:
script = """
SELECT p.first_name, p.last_name, p.height, c.name as club , c.number_of_trophies, pos.name as position
FROM players p
JOIN clubs c
ON p.club_id = c.club_id
JOIN positions pos
ON p.position_id = pos.position_id
WHERE (p.height BETWEEN %s AND %s)
AND (c.number_of_trophies BETWEEN %s AND %s)
AND p.position_id = %s;"""
with self.get_cursor() as cur:
cur.execute(script, [min_height, max_height, min_number, max_number, position_id])
rows = cur.fetchall()
return [(Club(name=r['club'], number_of_trophies=r['number_of_trophies']),
Player(first_name=r['first_name'], last_name=r['last_name'], height=r['height']),
Position(name=r['position'])) for r in rows]
# region Positions operations
def get_positions(self):
script = """
SELECT position_id, name
FROM positions"""
with self.get_cursor() as cur:
cur.execute(script)
positions = cur.fetchall()
return [Position(id=p['position_id'], name=p['name']) for p in positions]
# endregion
# region Players operations
def get_player(self, player_id: int) -> Player:
script = """
SELECT * FROM players
WHERE player_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [player_id])
db_player = cur.fetchone()
return Player(id=db_player['player_id'], first_name=db_player['first_name'], last_name=db_player['last_name'],
date_of_birth=db_player['date_of_birth'], is_injured=db_player['is_injured'],
height=db_player['height'], club_id=db_player['club_id'], position_id=db_player['position_id'])
def get_players(self) -> list:
with self.get_cursor() as cur:
cur.execute('SELECT player_id, first_name, last_name FROM players')
db_players = cur.fetchall()
return [Player(id=p['player_id'], first_name=p['first_name'], last_name=p['last_name']) for p in db_players]
def get_players_by_club(self, club_id) -> list:
script = """
SELECT p.first_name, p.last_name
FROM players AS p
WHERE p.club_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [club_id])
db_players = cur.fetchall()
return [Player(first_name=p['first_name'], last_name=p['last_name']) for p in db_players]
def get_players_free(self):
script = """
SELECT p.player_id, p.first_name, p.last_name
FROM players p
WHERE club_id is NULL"""
with self.get_cursor() as cur:
cur.execute(script)
db_players = cur.fetchall()
return [Player(first_name=p['first_name'], last_name=p['last_name'], id=p['player_id']) for p in db_players]
def add_player(self, player: Player) -> None:
insert_script = """
INSERT INTO players (first_name, last_name, date_of_birth, is_injured, position_id, height, club_id)
VALUES (%s, %s, %s, %s, %s, %s, %s)"""
insert_data = (player.first_name,
player.last_name,
player.date_of_birth,
player.is_injured,
player.position_id,
player.height,
player.club_id)
with self.get_cursor() as cur:
cur.execute(insert_script, insert_data)
self.conn.commit()
def update_player(self, player: Player) -> None:
update_script = """
UPDATE players
SET (first_name, last_name, date_of_birth, is_injured, position_id, height ,club_id) =
(%s, %s, %s, %s, %s, %s, %s)
WHERE player_id = %s;"""
update_data = (player.first_name, player.last_name,
player.date_of_birth, player.is_injured,
player.position_id, player.height, player.club_id, player.id)
with self.get_cursor() as cur:
cur.execute(update_script, update_data)
self.conn.commit()
def update_players_club(self, club_id: int, player_ids: list):
update_player = """
UPDATE players
SET club_id = %s
WHERE player_id IN %s"""
if player_ids:
with self.get_cursor() as cur:
cur.execute(update_player, (club_id, tuple(player_ids),))
self.conn.commit()
def delete_player(self, player_id: int) -> None:
delete_script = """DELETE FROM players WHERE player_id=%s;"""
with self.get_cursor()as cur:
cur.execute(delete_script, [player_id])
self.conn.commit()
# endregion
# region Club operations
def get_club(self, club_id: int) -> Club:
with self.get_cursor() as cur:
cur.execute('SELECT * FROM clubs WHERE club_id = {0}'.format(club_id))
c = cur.fetchone()
return Club(id=c['club_id'],
name=c['name'],
creation_date=c['creation_date'],
number_of_trophies=c['number_of_trophies'])
def get_clubs(self) -> list:
with self.get_cursor() as cur:
cur.execute('SELECT club_id as id, name FROM clubs')
db_clubs = cur.fetchall()
return [Club(id=c['id'], name=c['name']) for c in db_clubs]
def add_club(self, club: Club) -> int:
insert_script = """
INSERT INTO clubs (name, creation_date, number_of_trophies)
VALUES (%s, %s, %s) RETURNING club_id;"""
insert_data = (club.name, club.creation_date, club.number_of_trophies)
with self.get_cursor() as cur:
cur.execute(insert_script, insert_data)
new_id = cur.fetchone()[0]
self.conn.commit()
return new_id
def update_club(self, club: Club) -> None:
update_script = """
UPDATE clubs
SET (name, creation_date, number_of_trophies) = (%s, %s, %s)
WHERE club_id = %s;"""
update_data = (club.name, club.creation_date, club.number_of_trophies, club.id)
with self.get_cursor() as cur:
cur.execute(update_script, update_data)
self.conn.commit()
def delete_club(self, club_id: int) -> None:
delete_script = """DELETE FROM clubs WHERE club_id=%s;"""
with self.get_cursor() as cur:
cur.execute(delete_script, [club_id])
self.conn.commit()
def get_clubs_by_tournament(self, tournament_id: int):
script = """
SELECT c.club_id as id, c.name as name FROM clubs c
JOIN clubs_tournaments ct
ON ct.club_id = c.club_id
WHERE ct.tournament_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [tournament_id])
clubs = cur.fetchall()
self.conn.commit()
return [Club(id=c['id'], name=c['name']) for c in clubs]
def get_clubs_not_in_tournament(self, tournament_id: int):
script = """
SELECT c.club_id as id, c.name as name FROM clubs c
WHERE c.club_id NOT IN(
SELECT c.club_id FROM clubs c
JOIN clubs_tournaments ct
ON ct.club_id = c.club_id
WHERE ct.tournament_id = %s)"""
with self.get_cursor() as cur:
cur.execute(script, [tournament_id])
clubs = cur.fetchall()
return [Club(id=c['id'], name=c['name']) for c in clubs]
def add_clubs_to_tournament(self, tournament_id: int, club_ids: list):
data = [(cid, tournament_id) for cid in club_ids]
script = """INSERT INTO clubs_tournaments(club_id, tournament_id) VALUES %s"""
with self.get_cursor() as cur:
psycopg2.extras.execute_values(cur, script, data, template=None, page_size=100)
self.conn.commit()
# endregion
# region Tournament operations
def get_tournament(self, tournament_id: int) -> Tournament:
with self.get_cursor() as cur:
cur.execute('SELECT * FROM tournaments WHERE tournament_id = {0}'.format(tournament_id))
t = cur.fetchone()
return Tournament(id=t['tournament_id'], name=t['name'], description=t['description'])
def get_tournaments(self) -> list:
with self.get_cursor() as cur:
cur.execute('SELECT tournament_id as id, name FROM tournaments')
db_tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name']) for t in db_tournaments]
def delete_tournament(self, tournament_id: int) -> None:
delete_script = """DELETE FROM tournaments WHERE tournament_id=%s;"""
with self.get_cursor() as cur:
cur.execute(delete_script, [tournament_id])
self.conn.commit()
def add_tournament(self, tournament: Tournament) -> int:
insert_script = """
INSERT INTO tournaments (name, description)
VALUES (%s, %s) RETURNING tournament_id"""
insert_data = (tournament.name, tournament.description)
with self.get_cursor() as cur:
cur.execute(insert_script, insert_data)
new_id = cur.fetchone()[0]
self.conn.commit()
return new_id
def update_tournament(self, tournament: Tournament) -> None:
update_script = """
UPDATE tournaments
SET (name, description) = (%s, %s)
WHERE tournament_id = %s;"""
update_data = (tournament.name, tournament.description, tournament.id)
with self.get_cursor() as cur:
cur.execute(update_script, update_data)
self.conn.commit()
def get_tournaments_by_club(self, club_id: int):
script = """
SELECT t.tournament_id as id, t.name as name FROM tournaments t
JOIN clubs_tournaments ct
ON t.tournament_id = ct.tournament_id
WHERE ct.club_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [club_id])
tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name']) for t in tournaments]
# endregion
|
[
"faker.Faker",
"models.Player.Player",
"config.config.config",
"os.path.dirname",
"models.Tournament.Tournament",
"random.random",
"models.Club.Club",
"models.Position.Position",
"psycopg2.extras.execute_values",
"psycopg2.connect"
] |
[((1262, 1269), 'faker.Faker', 'Faker', ([], {}), '()\n', (1267, 1269), False, 'from faker import Faker\n'), ((2038, 2045), 'faker.Faker', 'Faker', ([], {}), '()\n', (2043, 2045), False, 'from faker import Faker\n'), ((2620, 2627), 'faker.Faker', 'Faker', ([], {}), '()\n', (2625, 2627), False, 'from faker import Faker\n'), ((5898, 6194), 'models.Player.Player', 'Player', ([], {'id': "db_player['player_id']", 'first_name': "db_player['first_name']", 'last_name': "db_player['last_name']", 'date_of_birth': "db_player['date_of_birth']", 'is_injured': "db_player['is_injured']", 'height': "db_player['height']", 'club_id': "db_player['club_id']", 'position_id': "db_player['position_id']"}), "(id=db_player['player_id'], first_name=db_player['first_name'],\n last_name=db_player['last_name'], date_of_birth=db_player[\n 'date_of_birth'], is_injured=db_player['is_injured'], height=db_player[\n 'height'], club_id=db_player['club_id'], position_id=db_player[\n 'position_id'])\n", (5904, 6194), False, 'from models.Player import Player\n'), ((9570, 9689), 'models.Club.Club', 'Club', ([], {'id': "c['club_id']", 'name': "c['name']", 'creation_date': "c['creation_date']", 'number_of_trophies': "c['number_of_trophies']"}), "(id=c['club_id'], name=c['name'], creation_date=c['creation_date'],\n number_of_trophies=c['number_of_trophies'])\n", (9574, 9689), False, 'from models.Club import Club\n'), ((12868, 12947), 'models.Tournament.Tournament', 'Tournament', ([], {'id': "t['tournament_id']", 'name': "t['name']", 'description': "t['description']"}), "(id=t['tournament_id'], name=t['name'], description=t['description'])\n", (12878, 12947), False, 'from models.Tournament import Tournament\n'), ((859, 867), 'config.config.config', 'config', ([], {}), '()\n', (865, 867), False, 'from config.config import config\n'), ((940, 966), 'psycopg2.connect', 'psycopg2.connect', ([], {}), '(**params)\n', (956, 966), False, 'import psycopg2\n'), ((3372, 3440), 'models.Tournament.Tournament', 'Tournament', ([], {'id': "t['id']", 'name': "t['name']", 'description': "t['description']"}), "(id=t['id'], name=t['name'], description=t['description'])\n", (3382, 3440), False, 'from models.Tournament import Tournament\n'), ((3973, 4041), 'models.Tournament.Tournament', 'Tournament', ([], {'id': "t['id']", 'name': "t['name']", 'description': "t['description']"}), "(id=t['id'], name=t['name'], description=t['description'])\n", (3983, 4041), False, 'from models.Tournament import Tournament\n'), ((5501, 5546), 'models.Position.Position', 'Position', ([], {'id': "p['position_id']", 'name': "p['name']"}), "(id=p['position_id'], name=p['name'])\n", (5509, 5546), False, 'from models.Position import Position\n'), ((6431, 6510), 'models.Player.Player', 'Player', ([], {'id': "p['player_id']", 'first_name': "p['first_name']", 'last_name': "p['last_name']"}), "(id=p['player_id'], first_name=p['first_name'], last_name=p['last_name'])\n", (6437, 6510), False, 'from models.Player import Player\n'), ((6879, 6939), 'models.Player.Player', 'Player', ([], {'first_name': "p['first_name']", 'last_name': "p['last_name']"}), "(first_name=p['first_name'], last_name=p['last_name'])\n", (6885, 6939), False, 'from models.Player import Player\n'), ((7265, 7344), 'models.Player.Player', 'Player', ([], {'first_name': "p['first_name']", 'last_name': "p['last_name']", 'id': "p['player_id']"}), "(first_name=p['first_name'], last_name=p['last_name'], id=p['player_id'])\n", (7271, 7344), False, 'from models.Player import Player\n'), ((9938, 9970), 'models.Club.Club', 'Club', ([], {'id': "c['id']", 'name': "c['name']"}), "(id=c['id'], name=c['name'])\n", (9942, 9970), False, 'from models.Club import Club\n'), ((11582, 11614), 'models.Club.Club', 'Club', ([], {'id': "c['id']", 'name': "c['name']"}), "(id=c['id'], name=c['name'])\n", (11586, 11614), False, 'from models.Club import Club\n'), ((12134, 12166), 'models.Club.Club', 'Club', ([], {'id': "c['id']", 'name': "c['name']"}), "(id=c['id'], name=c['name'])\n", (12138, 12166), False, 'from models.Club import Club\n'), ((12455, 12534), 'psycopg2.extras.execute_values', 'psycopg2.extras.execute_values', (['cur', 'script', 'data'], {'template': 'None', 'page_size': '(100)'}), '(cur, script, data, template=None, page_size=100)\n', (12485, 12534), False, 'import psycopg2\n'), ((13164, 13202), 'models.Tournament.Tournament', 'Tournament', ([], {'id': "t['id']", 'name': "t['name']"}), "(id=t['id'], name=t['name'])\n", (13174, 13202), False, 'from models.Tournament import Tournament\n'), ((14813, 14851), 'models.Tournament.Tournament', 'Tournament', ([], {'id': "t['id']", 'name': "t['name']"}), "(id=t['id'], name=t['name'])\n", (14823, 14851), False, 'from models.Tournament import Tournament\n'), ((485, 510), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (500, 510), False, 'import os\n'), ((4995, 5059), 'models.Club.Club', 'Club', ([], {'name': "r['club']", 'number_of_trophies': "r['number_of_trophies']"}), "(name=r['club'], number_of_trophies=r['number_of_trophies'])\n", (4999, 5059), False, 'from models.Club import Club\n'), ((5078, 5163), 'models.Player.Player', 'Player', ([], {'first_name': "r['first_name']", 'last_name': "r['last_name']", 'height': "r['height']"}), "(first_name=r['first_name'], last_name=r['last_name'], height=r['height']\n )\n", (5084, 5163), False, 'from models.Player import Player\n'), ((5177, 5205), 'models.Position.Position', 'Position', ([], {'name': "r['position']"}), "(name=r['position'])\n", (5185, 5205), False, 'from models.Position import Position\n'), ((1833, 1841), 'random.random', 'random', ([], {}), '()\n', (1839, 1841), False, 'from random import random\n'), ((1896, 1904), 'random.random', 'random', ([], {}), '()\n', (1902, 1904), False, 'from random import random\n'), ((1965, 1973), 'random.random', 'random', ([], {}), '()\n', (1971, 1973), False, 'from random import random\n'), ((2540, 2548), 'random.random', 'random', ([], {}), '()\n', (2546, 2548), False, 'from random import random\n')]
|
# -*- coding: utf-8 -*-
import requests
import time
import math
import signal
def is_ok(url: str) -> bool:
"""
Returns True if the provided URL responds with a 2XX when fetched via
a HTTP GET request.
"""
try:
resp = requests.get(url)
except:
return False
return True if math.floor(resp.status_code / 100) == 2 else False
def scan():
"""
Broadcasts the availability of the proxy's HTTP server once both the
API and UI are ready for traffic.
This script exists solely to ease confusion locally, as both Flask and
the HTTP server bundled with `create-react-app` output logs telling the
user about the ports they're bound to (even though they're inaccessible).
"""
print("")
print("⚓️ Ahoy!")
print("")
print(
"Your application is starting and will be available at " +
"http://localhost:8080 when it's ready."
)
print("")
# If someone tries to cancel the `docker-compose up` invocation, docker
# will send a SIGTERM to the program. We need to handle this and set a
# value that allows the loop to be broken.
term = False
def handle_interrupt(signal_number, stack_frame):
global term
term = True
signal.signal(signal.SIGTERM, handle_interrupt)
last_check = time.perf_counter()
is_app_live = False
while (is_app_live != True):
if term is True:
break
# We don't use `time.sleep()`, as that'd prevent us from being able
# to break the loop quickly in the event of a SIGTERM.
now = time.perf_counter()
if (now - last_check >= 5):
last_check = now
if not is_app_live:
is_app_live = is_ok("http://app:8000")
if is_app_live:
print("")
print("✨ Your local environment is ready:")
print("")
print(" http://localhost:8080")
print("")
print("⛵️ Smooth sailing!")
print("")
if __name__ == "__main__":
scan()
|
[
"signal.signal",
"math.floor",
"time.perf_counter",
"requests.get"
] |
[((1250, 1297), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'handle_interrupt'], {}), '(signal.SIGTERM, handle_interrupt)\n', (1263, 1297), False, 'import signal\n'), ((1316, 1335), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1333, 1335), False, 'import time\n'), ((247, 264), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (259, 264), False, 'import requests\n'), ((1589, 1608), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1606, 1608), False, 'import time\n'), ((317, 351), 'math.floor', 'math.floor', (['(resp.status_code / 100)'], {}), '(resp.status_code / 100)\n', (327, 351), False, 'import math\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import subprocess
#Creacion de la Poblacion
datos = open("Datos/Poblacion.txt","w")
datos.close()
datos = open("Datos/Estados.txt","w")
datos.close()
#generacion de coordenadas
contador = 0
for x in range(1,2000):
longitud = np.random.uniform(-108,-85,1)
latitud = np.random.uniform(14.5,25,1)
lon = longitud[0]
lat = latitud[0]
#poniendo limites
if lat < 16.3 and lon < -92.38:
pass
elif lat < 25 and lat > 18.119 and lon < -90.4 and lon > -97 :
pass
elif lon > -88 and lat > 16:
pass
elif lat > 24 and lon > -91:
pass
elif lat < 23.7 and lon < -105.5:
pass
elif lat < 18.27 and lon < -101:
pass
elif lat > 20.6 and lon > -98:
pass
elif lat < 24.39 and lon < -106.7:
pass
elif lat < 20.4 and lon < -105.3:
pass
elif lat < 18 and lon > -91:
pass
elif lat < 17.399 and lon < -98:
pass
elif lat < 19.7 and lon < -103.6:
pass
else:
contador = contador + 1
datos = open("Datos/Poblacion.txt","a")
datos.write(str(lat)+","
+str(lon)+"\n")
datos.close()
porcentajes = open("Datos/Datos.txt","r").read()
unidad = 0.7
inf = (float(porcentajes) * 0.7)/float(100) #rojo 2
sano = unidad - inf #amarillo 0
#generacion de estados
s = 0.3 #verde 1
r = 0.0 #azul 3
v = np.random.choice(4, contador, p=[sano, s, inf, r])
for i in v:
data = open("Datos/Estados.txt","a")
data.write(str(i)+"\n")
data.close()
mapa = subprocess.Popen([sys.executable, 'src/mapa.py'])
|
[
"numpy.random.uniform",
"subprocess.Popen",
"numpy.random.choice"
] |
[((1379, 1429), 'numpy.random.choice', 'np.random.choice', (['(4)', 'contador'], {'p': '[sano, s, inf, r]'}), '(4, contador, p=[sano, s, inf, r])\n', (1395, 1429), True, 'import numpy as np\n'), ((1529, 1578), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'src/mapa.py']"], {}), "([sys.executable, 'src/mapa.py'])\n", (1545, 1578), False, 'import subprocess\n'), ((390, 421), 'numpy.random.uniform', 'np.random.uniform', (['(-108)', '(-85)', '(1)'], {}), '(-108, -85, 1)\n', (407, 421), True, 'import numpy as np\n'), ((431, 461), 'numpy.random.uniform', 'np.random.uniform', (['(14.5)', '(25)', '(1)'], {}), '(14.5, 25, 1)\n', (448, 461), True, 'import numpy as np\n')]
|
# This file is compatible with both Python 2 and 3
import base64
import cv2
import json
import numpy as np
from flask import Response
import time
import functools
from collections import deque
class Stream(deque):
"""
A stream stores an output sequence stream of data. It inherits from deque.
Stream contains oldest-newest data from left to right.
Stream has a "capacity" -- if the stream is full, it will drop the oldest data.
vizstream will monitor the stream and will send data to the browser whenever the
stream is updated and its timestamp changes.
"""
def __init__(self, capacity=1, fps=10):
"""
Args:
capacity: (int) maximum capacity of stream.
"""
self.capacity = capacity
self.fps = fps
self.timestamp = 0
super(Stream, self).__init__(maxlen=self.capacity)
def publish(self, **kwargs):
item = dict(data=kwargs, timestamp=self.timestamp)
self.timestamp += 1
self.append(item)
def reset(self):
self.clear()
self.timestamp = 0
def vizstream(app, stream, astype):
if astype == 'scene_cloud':
url = '/api/stream_scene_cloud'
data2msg = data2msg_scene_cloud
elif astype == 'lc_curtain':
url = '/api/stream_lc_curtain'
data2msg = data2msg_lc_curtain
# elif astype == 'camera_image':
# url = '/api/stream_camera_image'
# data2msg = data2msg_camera_image
# elif astype == 'lidar_cloud':
# url = '/api/stream_lidar_cloud'
# data2msg = data2msg_lidar_cloud
# elif astype == 'dt_boxes':
# url = '/api/stream_dt_boxes'
# data2msg = data2msg_dt_boxes
# elif astype == 'entropy_map':
# url = '/api/stream_entropy_map'
# data2msg = data2msg_entropy_map
# elif astype == 'arrows':
# url = '/api/stream_arrows'
# data2msg = data2msg_arrows
else:
raise Exception("astype={} not valid".format(astype))
def generator():
sent_timestamp = None
while True:
if len(stream) == 0:
sent_timestamp = None
elif sent_timestamp != stream[-1]["timestamp"]:
sent_timestamp = stream[-1]["timestamp"]
data = stream[-1]["data"]
msg = data2msg(**data)
yield "data:{}\n\n".format(msg)
time.sleep(1.0 / stream.fps)
@app.route(url, methods=['GET', 'POST'])
@functools.wraps(data2msg)
def route_fn():
return Response(generator(), mimetype="text/event-stream")
########################################################################################################################
# region data2msg functions
########################################################################################################################
def data2msg_scene_cloud(scene_points, se_design_pts=None, downsample=False, int16_factor=100):
"""
Args:
scene_points (np.ndarray, dtype=float32, shape=(N, 6)): scene points
se_design_points (Optional(np.ndarray, dtype=float32, shape=(C, 2))): design points of the safety envelope
"""
# the next line downsamples the scene points. it selects one from every three points.
if downsample:
scene_points = scene_points[::3, :]
# convert to int16
scene_points = scene_points * int16_factor
scene_points = scene_points.astype(np.int16)
scene_pc_str = base64.b64encode(scene_points.tobytes()).decode("utf-8")
send_dict = dict(scene_pc_str=scene_pc_str)
if se_design_pts is not None:
# convert to int16
se_design_pts = se_design_pts * int16_factor
se_design_pts = se_design_pts.astype(np.int16)
se_pc_str = base64.b64encode(se_design_pts.tobytes()).decode("utf-8")
send_dict["se_pc_str"] = se_pc_str
json_str = json.dumps(send_dict)
return json_str
# def data2msg_camera_image(data: Frame):
# image_str = data.cam["image_str"]
# image_dtype = data.cam["datatype"]
# image_b64 = base64.b64encode(image_str).decode("utf-8")
# image_b64 = f"data:image/{image_dtype};base64,{image_b64}"
# return image_b64
# def data2msg_lidar_cloud(data, int16_factor=100):
# points = data # (N, 3)
# # convert to int16
# points = points * int16_factor
# points = points.astype(np.int16)
# pc_str = base64.b64encode(points.tobytes()).decode("utf-8")
# return pc_str
def data2msg_lc_curtain(lc_image, lc_cloud, score=None, int16_factor=100):
"""
Args:
lc_image: light curtain image.
- (np.ndarray, dtype=float32, shape=(H, C, 4)).
- Axis 2 corresponds to (x, y, z, i):
- x : x in cam frame.
- y : y in cam frame.
- z : z in cam frame.
- i : intensity of LC cloud, lying in [0, 255].
lc_cloud: light curtain point cloud.
- (np.ndarray, dtype=float32, shape=(N, 4)).
- Axis 2 corresponds to (x, y, z, i):
- x : x in cam frame.
- y : y in cam frame.
- z : z in cam frame.
- i : intensity of LC cloud, lying in [0, 1].
score (Optional(float)): score to be displayed in kittiviewer
"""
# boundary
lc_image = lc_image[:, :, :3] # (H, C, 3)
ys = lc_image[:, :, 1] # (H, C)
ys[np.isnan(ys)] = 0 # replacing NaNs with zeros shouldn't affect the columnwise min or max of y
top_inds = np.argmin(ys, axis=0) # (C,)
bot_inds = np.argmax(ys, axis=0) # (C,)
top_xyz = lc_image[top_inds, np.arange(len(top_inds)), :] # (C, 3)
bot_xyz = lc_image[bot_inds, np.arange(len(bot_inds)), :] # (C, 3)
boundary = np.stack([top_xyz, bot_xyz], axis=1) # (C, 2, 3)
mask = np.isfinite(boundary).all(axis=(1, 2)) # (C,)
boundary = boundary[mask] # (C', 2, 3)
# intersection points
isect_pts = lc_cloud[lc_cloud[:, 3] > 0.05] # (N', 4)
# convert to int16
boundary = (boundary * int16_factor).astype(np.int16)
isect_pts = (isect_pts * int16_factor).astype(np.int16)
boundary_str = base64.b64encode(boundary.tobytes()).decode("utf-8")
isect_pts_str = base64.b64encode(isect_pts.tobytes()).decode("utf-8")
send_dict = dict(boundary=boundary_str, isect_pts=isect_pts_str, score=score)
json_str = json.dumps(send_dict)
return json_str
# def data2msg_dt_boxes(data):
# dt_boxes = data["detections"]
# json_str = json.dumps(dt_boxes)
# return json_str
# def data2msg_entropy_map(data):
# confidence_map = data["confidence_map"]
# entropy_heatmap = _create_entropy_heatmap(confidence_map)
# image_str = cv2.imencode('.png', entropy_heatmap)[1].tostring()
# image_b64 = base64.b64encode(image_str).decode("utf-8")
# image_b64 = f"data:image/png;base64,{image_b64}"
# return image_b64
# def data2msg_arrows(data):
# tails = list([float(e) for e in data["tails"].ravel()])
# heads = list([float(e) for e in data["heads"].ravel()])
# arrows = dict(tails=tails, heads=heads)
# json_str = json.dumps(arrows)
# return json_str
# endregion
########################################################################################################################
# region Helper functions
########################################################################################################################
# def _create_confidence_heatmap(confidence_map):
# # Take the mean of confidences for the 0-degrees and 90-degrees anchors
# conf_scores = confidence_map[:, :, 2:] # (Y, X, K)
# conf_scores = conf_scores.mean(axis=2) # (Y, X)
# # Rescale between 0 and 1.
# # conf_scores = conf_scores - conf_scores.min()
# # conf_scores = conf_scores / conf_scores.max()
# heatmap = cv2.applyColorMap((conf_scores * 255).astype(np.uint8), cv2.COLORMAP_HOT)
# return heatmap
# def _create_entropy_heatmap(confidence_map):
# p = confidence_map[:, :, 2:] # (Y, X, K)
# p = p.clip(1e-5, 1-1e-5) # (Y, X, K)
# entropy = -p * np.log2(p) - (1-p) * np.log2(1-p) # (Y, X, K)
# entropy = entropy.mean(axis=2) # (Y, X)
# heatmap = cv2.applyColorMap((entropy * 255).astype(np.uint8), cv2.COLORMAP_HOT)
# return heatmap
# endregion
########################################################################################################################
|
[
"numpy.stack",
"numpy.argmax",
"numpy.isfinite",
"numpy.argmin",
"json.dumps",
"numpy.isnan",
"time.sleep",
"functools.wraps"
] |
[((2501, 2526), 'functools.wraps', 'functools.wraps', (['data2msg'], {}), '(data2msg)\n', (2516, 2526), False, 'import functools\n'), ((3907, 3928), 'json.dumps', 'json.dumps', (['send_dict'], {}), '(send_dict)\n', (3917, 3928), False, 'import json\n'), ((5582, 5603), 'numpy.argmin', 'np.argmin', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (5591, 5603), True, 'import numpy as np\n'), ((5627, 5648), 'numpy.argmax', 'np.argmax', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (5636, 5648), True, 'import numpy as np\n'), ((5816, 5852), 'numpy.stack', 'np.stack', (['[top_xyz, bot_xyz]'], {'axis': '(1)'}), '([top_xyz, bot_xyz], axis=1)\n', (5824, 5852), True, 'import numpy as np\n'), ((6440, 6461), 'json.dumps', 'json.dumps', (['send_dict'], {}), '(send_dict)\n', (6450, 6461), False, 'import json\n'), ((5472, 5484), 'numpy.isnan', 'np.isnan', (['ys'], {}), '(ys)\n', (5480, 5484), True, 'import numpy as np\n'), ((2421, 2449), 'time.sleep', 'time.sleep', (['(1.0 / stream.fps)'], {}), '(1.0 / stream.fps)\n', (2431, 2449), False, 'import time\n'), ((5877, 5898), 'numpy.isfinite', 'np.isfinite', (['boundary'], {}), '(boundary)\n', (5888, 5898), True, 'import numpy as np\n')]
|
import os
import json
local_path = os.path.dirname(__file__)
def load_config_file(path):
file = open(os.path.join(local_path, os.pardir, path))
return json.load(file)
def load_loc():
return load_config_file('config/loc.json')
def load_responses_config():
return load_config_file('config/responses.json')
def load_bot_config():
return load_config_file('config/config.json')
def load_emoji_config():
file = open(os.path.join(local_path, os.pardir, 'config/emoji.json'), encoding="utf8")
return json.load(file)
def load_gif_config():
return load_config_file('config/gif.json')
def load_tokens():
return load_config_file('config/token.json')
|
[
"os.path.dirname",
"json.load",
"os.path.join"
] |
[((36, 61), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (51, 61), False, 'import os\n'), ((162, 177), 'json.load', 'json.load', (['file'], {}), '(file)\n', (171, 177), False, 'import json\n'), ((531, 546), 'json.load', 'json.load', (['file'], {}), '(file)\n', (540, 546), False, 'import json\n'), ((108, 149), 'os.path.join', 'os.path.join', (['local_path', 'os.pardir', 'path'], {}), '(local_path, os.pardir, path)\n', (120, 149), False, 'import os\n'), ((445, 501), 'os.path.join', 'os.path.join', (['local_path', 'os.pardir', '"""config/emoji.json"""'], {}), "(local_path, os.pardir, 'config/emoji.json')\n", (457, 501), False, 'import os\n')]
|
from __future__ import unicode_literals, absolute_import
from django.contrib.auth import get_user_model
from django.test import TestCase
from django_functest import FuncWebTestMixin, ShortcutLoginMixin
User = get_user_model()
class WebTestBase(FuncWebTestMixin, ShortcutLoginMixin, TestCase):
@classmethod
def setUpTestData(cls):
super(WebTestBase, cls).setUpTestData()
cls.USER = User.objects.create_user(
username="user", password="<PASSWORD>", email="<EMAIL>"
)
|
[
"django.contrib.auth.get_user_model"
] |
[((212, 228), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (226, 228), False, 'from django.contrib.auth import get_user_model\n')]
|
import unittest
from typing import Sequence, Iterable, cast, Mapping
import tempfile
import os
import numpy as np
import joblib
from hypothesis import given, note
from hypothesis import settings, strategies as st
from scilk.corpora import genia
from scilk.util import intervals
from scilk.collections import _collections
import scilk
MAX_TESTS = 1000
# strategies
texts = st.text(st.characters(min_codepoint=32, max_codepoint=255), 0, 500, 1000)
def loader_caller(collection: _collections.Collection, data=None):
def caller(value: str):
return collection.translate(value)
return caller
def loader_translate(collection: _collections.Collection, data: dict):
mapping = joblib.load(data['mapping'])
def translate(value: str):
return mapping.get(value)
return translate
# test cases
class TestText(unittest.TestCase):
@staticmethod
def unparse(txt, intervals_: Sequence[intervals.Interval]):
if not len(intervals_):
return ""
codes = np.repeat([ord(" ")], intervals_[-1].stop)
for iv in intervals_:
token = intervals.extract(txt, [iv])[0]
codes[iv.start:iv.stop] = list(map(ord, token))
return "".join(map(chr, codes))
# @given(texts)
# @settings(max_examples=MAX_TESTS)
# def test_parse_text(self, txt):
# parsed = text.tointervals(text.fine_tokeniser, txt)
# mod_text = re.sub("\s", " ", txt)
# self.assertEqual(self.unparse(txt, parsed), mod_text.rstrip())
class TestGenia(unittest.TestCase):
@given(st.lists(st.text()))
@settings(max_examples=MAX_TESTS)
def test_text_boundaries(self, texts: list):
"""
Test of text_boundaries() function.
:return:
"""
boundaries = genia._segment_borders(texts)
note(boundaries)
self.assertTrue(all([boundaries[i][1] == boundaries[i + 1][0] for i in
range(len(boundaries) - 1)]))
self.assertTrue(all([boundaries[i][0] <= boundaries[i][1] for i in
range(len(boundaries) - 1)]))
if boundaries:
self.assertTrue(boundaries[0][0] == 0)
class TestCollection(unittest.TestCase):
def test_collection(self):
with tempfile.TemporaryDirectory() as dirpath:
scilk.SCILK_ROOT = dirpath
mapping = dict(test='OK')
mapping_path = os.path.join(dirpath, 'mapping.joblib')
joblib.dump(mapping, mapping_path)
collection = _collections.Collection()
collection.add('translate', loader_translate, dict(mapping=mapping_path))
collection.add('caller', loader_caller)
self.assertAlmostEqual(collection.caller('test'), 'OK')
collection.save(name='test')
collection = _collections.Collection.load('test')
self.assertAlmostEqual(collection.caller('test'), 'OK')
self.assertEquals({'translate', 'caller'}, set(collection.entries))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"scilk.collections._collections.Collection.load",
"tempfile.TemporaryDirectory",
"hypothesis.strategies.characters",
"joblib.dump",
"hypothesis.note",
"scilk.collections._collections.Collection",
"hypothesis.settings",
"scilk.corpora.genia._segment_borders",
"hypothesis.strategies.text",
"scilk.util.intervals.extract",
"joblib.load",
"os.path.join"
] |
[((386, 436), 'hypothesis.strategies.characters', 'st.characters', ([], {'min_codepoint': '(32)', 'max_codepoint': '(255)'}), '(min_codepoint=32, max_codepoint=255)\n', (399, 436), True, 'from hypothesis import settings, strategies as st\n'), ((699, 727), 'joblib.load', 'joblib.load', (["data['mapping']"], {}), "(data['mapping'])\n", (710, 727), False, 'import joblib\n'), ((1599, 1631), 'hypothesis.settings', 'settings', ([], {'max_examples': 'MAX_TESTS'}), '(max_examples=MAX_TESTS)\n', (1607, 1631), False, 'from hypothesis import settings, strategies as st\n'), ((3049, 3064), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3062, 3064), False, 'import unittest\n'), ((1787, 1816), 'scilk.corpora.genia._segment_borders', 'genia._segment_borders', (['texts'], {}), '(texts)\n', (1809, 1816), False, 'from scilk.corpora import genia\n'), ((1825, 1841), 'hypothesis.note', 'note', (['boundaries'], {}), '(boundaries)\n', (1829, 1841), False, 'from hypothesis import given, note\n'), ((1582, 1591), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (1589, 1591), True, 'from hypothesis import settings, strategies as st\n'), ((2275, 2304), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2302, 2304), False, 'import tempfile\n'), ((2421, 2460), 'os.path.join', 'os.path.join', (['dirpath', '"""mapping.joblib"""'], {}), "(dirpath, 'mapping.joblib')\n", (2433, 2460), False, 'import os\n'), ((2473, 2507), 'joblib.dump', 'joblib.dump', (['mapping', 'mapping_path'], {}), '(mapping, mapping_path)\n', (2484, 2507), False, 'import joblib\n'), ((2533, 2558), 'scilk.collections._collections.Collection', '_collections.Collection', ([], {}), '()\n', (2556, 2558), False, 'from scilk.collections import _collections\n'), ((2831, 2867), 'scilk.collections._collections.Collection.load', '_collections.Collection.load', (['"""test"""'], {}), "('test')\n", (2859, 2867), False, 'from scilk.collections import _collections\n'), ((1113, 1141), 'scilk.util.intervals.extract', 'intervals.extract', (['txt', '[iv]'], {}), '(txt, [iv])\n', (1130, 1141), False, 'from scilk.util import intervals\n')]
|
import os
import logging
import argparse
import sys
from .controllers.hot_lobs import HotLobMonitor
from .controllers.cold_lobs import ColdLobMonitor
if __name__ == "__main__":
# Check for config file
if "PROJECT_LOB_CONFIG" not in os.environ:
print("PROJECT_LOB_CONFIG must be an environment variable set to "
"the path of a JSON config file.")
sys.exit()
config_path = os.environ["PROJECT_LOB_CONFIG"]
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-hm", "--hot-monitor", help="Run the hot lob monitor",
action="store_true")
parser.add_argument("-cm", "--cold-monitor", help="Run the cold lob monitor",
action="store_true")
args = parser.parse_args()
if not args.hot_monitor and not args.cold_monitor:
print("Must specify a program to run, hot monitor or cold"
" controller!")
sys.exit()
if args.hot_monitor and args.cold_monitor:
print("Cannot run both hot and cold monitoring programs."
" Please specify one")
sys.exit()
# Setup logging
logger = logging.getLogger('project_lob')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('lobster_log.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# Run monitor
if args.hot_monitor:
logger.info("Creating hot lob monitor!")
monitor = HotLobMonitor(config_path)
if args.cold_monitor:
logger.info("Creating cold lob monitor!")
monitor = ColdLobMonitor(config_path)
monitor.run()
|
[
"argparse.ArgumentParser",
"logging.FileHandler",
"logging.StreamHandler",
"logging.Formatter",
"sys.exit",
"logging.getLogger"
] |
[((484, 509), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (507, 509), False, 'import argparse\n'), ((1169, 1201), 'logging.getLogger', 'logging.getLogger', (['"""project_lob"""'], {}), "('project_lob')\n", (1186, 1201), False, 'import logging\n'), ((1246, 1284), 'logging.FileHandler', 'logging.FileHandler', (['"""lobster_log.log"""'], {}), "('lobster_log.log')\n", (1265, 1284), False, 'import logging\n'), ((1325, 1348), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1346, 1348), False, 'import logging\n'), ((1396, 1468), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""'], {}), "('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n", (1413, 1468), False, 'import logging\n'), ((386, 396), 'sys.exit', 'sys.exit', ([], {}), '()\n', (394, 396), False, 'import sys\n'), ((955, 965), 'sys.exit', 'sys.exit', ([], {}), '()\n', (963, 965), False, 'import sys\n'), ((1124, 1134), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1132, 1134), False, 'import sys\n')]
|
import math
from math import pi,sqrt,radians,degrees
import sys
g=9.8
G=6.67e-11
e=1.6021917e-19
ke=8.9875e9
me=9.1095e-31
mp=1.67261e-27
mn=1.674929e-27
blocked=["exec","sys","eval","PROCESS","CMD","RESULT","block","import","math","from",]
def sin(deg):
return math.sin(radians(deg))
def cos(deg):
return math.cos(radians(deg))
def tan(deg):
return math.tan(radians(deg))
def asin(n):
return degrees(math.asin(n))
def acos(n):
return degrees(math.acos(n))
def atan(n):
return degrees(math.atan(n))
def sq(n):
return n*n
ans=0
def PROCESS(CMD):
global ans
for block in blocked:
if block in CMD:
print('The keyword "'+block+'" has been blocked for security reasons.')
return
RESULT=eval(CMD)
if type(RESULT).__name__=="float" and abs(RESULT)>=1e-3 and abs(RESULT)<=1e9:
print("%.6f"%RESULT)
elif type(RESULT).__name__=="float" and abs(RESULT)>=1e9:
print("%e"%RESULT)
else:
print(RESULT)
ans=RESULT
print("Welcome to YAXO Physics Calculator!\n")
while True:
try:
PROCESS(input(">>> "))
except KeyboardInterrupt:
break
except:
print(sys.exc_info()[0].__name__)
input("\nThank you for using YAXO Physics Calculator! (Press Return to exit)")
|
[
"math.atan",
"math.asin",
"math.radians",
"math.acos",
"sys.exc_info"
] |
[((278, 290), 'math.radians', 'radians', (['deg'], {}), '(deg)\n', (285, 290), False, 'from math import pi, sqrt, radians, degrees\n'), ((326, 338), 'math.radians', 'radians', (['deg'], {}), '(deg)\n', (333, 338), False, 'from math import pi, sqrt, radians, degrees\n'), ((374, 386), 'math.radians', 'radians', (['deg'], {}), '(deg)\n', (381, 386), False, 'from math import pi, sqrt, radians, degrees\n'), ((420, 432), 'math.asin', 'math.asin', (['n'], {}), '(n)\n', (429, 432), False, 'import math\n'), ((466, 478), 'math.acos', 'math.acos', (['n'], {}), '(n)\n', (475, 478), False, 'import math\n'), ((512, 524), 'math.atan', 'math.atan', (['n'], {}), '(n)\n', (521, 524), False, 'import math\n'), ((1208, 1222), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1220, 1222), False, 'import sys\n')]
|
from autokeras.generator import DefaultClassifierGenerator
from autokeras.utils import *
from tests.common import get_processed_data
def test_model_trainer():
model = DefaultClassifierGenerator(3, (28, 28, 3)).generate().produce_model()
train_data, test_data = get_processed_data()
ModelTrainer(model, train_data, test_data, False).train_model(max_iter_num=3)
|
[
"autokeras.generator.DefaultClassifierGenerator",
"tests.common.get_processed_data"
] |
[((272, 292), 'tests.common.get_processed_data', 'get_processed_data', ([], {}), '()\n', (290, 292), False, 'from tests.common import get_processed_data\n'), ((174, 216), 'autokeras.generator.DefaultClassifierGenerator', 'DefaultClassifierGenerator', (['(3)', '(28, 28, 3)'], {}), '(3, (28, 28, 3))\n', (200, 216), False, 'from autokeras.generator import DefaultClassifierGenerator\n')]
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Create new point. Synchonize with layer and file
-------------------
begin : 2018-07-11
git sha : $Format:%H$
author : <NAME>
***************************************************************************/
"""
from qgis.gui import QgsMapTool
from qgis.core import QgsMapLayer
from PyQt4.QtGui import QCursor
from PyQt4.QtCore import Qt
import util_table
import util_layer
import util_io
class FeatureToolAdd(QgsMapTool):
def __init__(self, canvas):
super(QgsMapTool, self).__init__(canvas)
self.canvas = canvas
self.cursor = QCursor(Qt.CrossCursor)
def activate(self):
self.canvas.setCursor(self.cursor)
def setTable(self, table):
self.table = table
def setLayer(self, layer):
self.layer = layer
def setUrl(self, url):
self.url = url
def canvasReleaseEvent(self, mouseEvent):
"""
Each time the mouse is clicked on the map canvas, perform
the following tasks:
...
"""
layerGrille = None
for layer in self.canvas.layers():
if layer.type() == QgsMapLayer.VectorLayer:
if (layer.name() == util_layer.CONST_NOM_LAYER_GRILLE):
layerGrille = layer
p = mouseEvent.pos()
# Determine the location of the click in real-world coords
layerPoint = self.toLayerCoordinates( layerGrille, p )
# =============================================================================
# Ajout dans le layer, tableau, fichier
# On ajoute la ligne au tableau
self.table = util_table.addLigne(self.table, layerPoint.x(), layerPoint.y())
# On enregistre dans le fichier
util_io.addLigne(self.url, layerPoint.x(), layerPoint.y())
# On synchronise avec le layer
self.layer = util_layer.addPointLayer(self.layer, layerPoint.x(), layerPoint.y())
# ====================================================================
# Un petit refresh
# QgsMapLayerRegistry.instance().addMapLayer(self.layer)
self.canvas.refresh();
|
[
"PyQt4.QtGui.QCursor"
] |
[((769, 792), 'PyQt4.QtGui.QCursor', 'QCursor', (['Qt.CrossCursor'], {}), '(Qt.CrossCursor)\n', (776, 792), False, 'from PyQt4.QtGui import QCursor\n')]
|
import unittest
from credential import Credential
class TestCredential(unittest.TestCase):
"""
Test class that defines test cases for the credential class behavioursself.
Args:
unittest.TestCase:TestCase class that helps in creating test cases
"""
def setUp(self):
"""
Set up method to run before each test casesself.
"""
self.new_credential = Credential("Twitter","Davidngatia","4321")
def tearDown(self):
"""
tearDown method that does clean up after each test case has run.
"""
Credential.credential_list = []
def test_init(self):
"""
test_init test case to test if the object is initialized properly
"""
self.assertEqual(self.new_credential.account_type,"Twitter")
self.assertEqual(self.new_credential.user_name,"Davidngatia")
self.assertEqual(self.new_credential.password,"<PASSWORD>")
def test_generate_password(self):
"""
method that generates a password
"""
self.assertEqual(Credential.password(),Credential.password)
def test_save_credential(self):
"""
test save credential test case to test if the contact object is saved into the credential_list
"""
self.new_credential.save_credential()
self.assertEqual(len(Credential.credential_list),1)
def test_save_multiple_credentials(self):
"""
test_save_multiple_credentials to check if it can save multiple credentials objects to our credential_list
"""
self.new_credential.save_credential()
test_credential=Credential("Facebook","Davidngatia","09876")
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_display_credentials(self):
"""
method that returns a list of all credentials save_credential
"""
self.assertEqual(Credential.display_credentials(),Credential.credential_list)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"credential.Credential.password",
"credential.Credential",
"credential.Credential.display_credentials"
] |
[((2048, 2063), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2061, 2063), False, 'import unittest\n'), ((407, 451), 'credential.Credential', 'Credential', (['"""Twitter"""', '"""Davidngatia"""', '"""4321"""'], {}), "('Twitter', 'Davidngatia', '4321')\n", (417, 451), False, 'from credential import Credential\n'), ((1643, 1689), 'credential.Credential', 'Credential', (['"""Facebook"""', '"""Davidngatia"""', '"""09876"""'], {}), "('Facebook', 'Davidngatia', '09876')\n", (1653, 1689), False, 'from credential import Credential\n'), ((1073, 1094), 'credential.Credential.password', 'Credential.password', ([], {}), '()\n', (1092, 1094), False, 'from credential import Credential\n'), ((1950, 1982), 'credential.Credential.display_credentials', 'Credential.display_credentials', ([], {}), '()\n', (1980, 1982), False, 'from credential import Credential\n')]
|
import platform
import sys
from setuptools import setup
if platform.python_implementation() == 'CPython':
try:
import wheel.bdist_wheel
except ImportError:
cmdclass = {}
else:
class bdist_wheel(wheel.bdist_wheel.bdist_wheel):
def finalize_options(self) -> None:
self.py_limited_api = f'cp3{sys.version_info[1]}'
super().finalize_options()
cmdclass = {'bdist_wheel': bdist_wheel}
else:
cmdclass = {}
setup(cffi_modules=['editdistance_s_build.py:ffibuilder'], cmdclass=cmdclass)
|
[
"platform.python_implementation",
"setuptools.setup"
] |
[((495, 572), 'setuptools.setup', 'setup', ([], {'cffi_modules': "['editdistance_s_build.py:ffibuilder']", 'cmdclass': 'cmdclass'}), "(cffi_modules=['editdistance_s_build.py:ffibuilder'], cmdclass=cmdclass)\n", (500, 572), False, 'from setuptools import setup\n'), ((61, 93), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (91, 93), False, 'import platform\n')]
|
import smtplib
from email.message import EmailMessage
def send_mail(mail: str, name: str):
user = ''
password = ''
text="Hello " + name + ", \n your account has been created succesfully"
msg = EmailMessage()
msg.set_content(text)
msg['Subject'] = "Confirmation"
msg['From'] = ""
msg['To'] = mail
s = smtplib.SMTP("", 465)
s.ehlo()
s.starttls()
s.login(user, password)
s.send_message(msg)
s.close()
|
[
"email.message.EmailMessage",
"smtplib.SMTP"
] |
[((211, 225), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (223, 225), False, 'from email.message import EmailMessage\n'), ((338, 359), 'smtplib.SMTP', 'smtplib.SMTP', (['""""""', '(465)'], {}), "('', 465)\n", (350, 359), False, 'import smtplib\n')]
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.common import Contributor
from cybox.test import EntityTestCase
class TestContributor(EntityTestCase, unittest.TestCase):
klass = Contributor
_full_dict = {
'role': "Lead Tester",
'name': "<NAME>",
'email': "<EMAIL>",
'phone': "(123) 456-7890",
'organization': "Doc Brown Enterprises(tm)",
'date': {
'start_date': "1955-11-05",
'end_date': "1985-11-05",
},
'contribution_location': "Hill Valley, CA",
}
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main"
] |
[((672, 687), 'unittest.main', 'unittest.main', ([], {}), '()\n', (685, 687), False, 'import unittest\n')]
|
"""
This script prints out the commands to delete all files generated by a pipeline,
downstream of a specified stage.
If one stage was wrong, and you need to re-run everything it affected, this script
will print out the commands to delete the relevant files to that re-running the pipeline
with resume=True will re-run the correct stages.
"""
import sys
sys.path.append('.')
import ceci
import txpipe
import yaml
import collections
import os
# start from a config file and a stage to delete
config = yaml.safe_load(open(sys.argv[1]))
stage_to_delete = sys.argv[2]
# get the stages we need
stage_names = [s['name'] for s in config['stages']]
pipeline = ceci.Pipeline(config['stages'], None)
stages = [ceci.PipelineStage.get_stage(stage_name) for stage_name in stage_names]
# build the mapping tag => stages depending on that tag
dependencies = collections.defaultdict(list)
for stage in stages:
for tag in stage.input_tags():
dependencies[tag].append(stage)
# initialize with deletng one stage and the tags it makes
tags_to_delete = ceci.PipelineStage.get_stage(stage_to_delete).output_tags()
stages_to_delete = {stage}
# loop through nstage times (the maximum it could be)
for i in range(len(stage_names)):
# take all tags we currently know we have to delete
for tag in tags_to_delete[:]:
# find out which stages to clear because they need
# this tag which we are deleting
deps = set(dependencies[tag])
for s in stages:
if s in deps:
# if we need to delete this stage,
# add its outputs to the tags to delete
tags_to_delete += s.output_tags()
# and it to the stages to delete
stages_to_delete.add(s)
tags_to_delete = list(set(tags_to_delete))
# now at the end we delete all tags output by stage to delete
for s in stages_to_delete:
for f in pipeline.find_outputs(s, config).values():
print(f"rm -f {f}")
|
[
"sys.path.append",
"collections.defaultdict",
"ceci.Pipeline",
"ceci.PipelineStage.get_stage"
] |
[((355, 375), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (370, 375), False, 'import sys\n'), ((655, 692), 'ceci.Pipeline', 'ceci.Pipeline', (["config['stages']", 'None'], {}), "(config['stages'], None)\n", (668, 692), False, 'import ceci\n'), ((847, 876), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (870, 876), False, 'import collections\n'), ((703, 743), 'ceci.PipelineStage.get_stage', 'ceci.PipelineStage.get_stage', (['stage_name'], {}), '(stage_name)\n', (731, 743), False, 'import ceci\n'), ((1049, 1094), 'ceci.PipelineStage.get_stage', 'ceci.PipelineStage.get_stage', (['stage_to_delete'], {}), '(stage_to_delete)\n', (1077, 1094), False, 'import ceci\n')]
|
from jaraco.windows import security
def test_get_security_attributes_for_user():
security.get_security_attributes_for_user()
|
[
"jaraco.windows.security.get_security_attributes_for_user"
] |
[((87, 130), 'jaraco.windows.security.get_security_attributes_for_user', 'security.get_security_attributes_for_user', ([], {}), '()\n', (128, 130), False, 'from jaraco.windows import security\n')]
|
#!/usr/bin/env python3
'''
SPDX-License-Identifier: BSD-2-Clause
Copyright 2017 Massachusetts Institute of Technology.
'''
import asyncio
import json
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
from tornado import httpclient, platform
from keylime import common
async def request(method,url,params=None,data=None,context=None):
http_client = httpclient.AsyncHTTPClient()
if params is not None and len(list(params.keys()))>0:
url+='?'
for key in list(params.keys()):
url+="%s=%s&"%(key,params[key])
url=url[:-1]
if context is not None:
url = url.replace('http://','https://',1)
try:
request = httpclient.HTTPRequest(url=url,
method=method,
ssl_options=context,
body=data)
response = await http_client.fetch(request)
except httpclient.HTTPError as e:
if e.response is None:
return tornado_response(500,str(e))
return tornado_response(e.response.code,e.response.body)
except ConnectionError as e:
return tornado_response(599,"Connection error: %s"%e)
if response is None:
return None
return tornado_response(response.code,response.body)
def is_refused(e):
if hasattr(e,'strerror'):
return "Connection refused" in e.strerror
else:
return False
class tornado_response():
def __init__(self,code,body):
self.status_code = code
self.body = body
def json(self):
try:
retval = json.loads(self.body)
except Exception as e:
retval = [self.body,str(e)]
return retval
def yaml(self):
try:
retval = yaml.load(self.body, Loader=SafeLoader)
except Exception as e:
retval = [self.body,str(e)]
return retval
|
[
"yaml.load",
"json.loads",
"tornado.httpclient.AsyncHTTPClient",
"tornado.httpclient.HTTPRequest"
] |
[((490, 518), 'tornado.httpclient.AsyncHTTPClient', 'httpclient.AsyncHTTPClient', ([], {}), '()\n', (516, 518), False, 'from tornado import httpclient, platform\n'), ((806, 884), 'tornado.httpclient.HTTPRequest', 'httpclient.HTTPRequest', ([], {'url': 'url', 'method': 'method', 'ssl_options': 'context', 'body': 'data'}), '(url=url, method=method, ssl_options=context, body=data)\n', (828, 884), False, 'from tornado import httpclient, platform\n'), ((1747, 1768), 'json.loads', 'json.loads', (['self.body'], {}), '(self.body)\n', (1757, 1768), False, 'import json\n'), ((1919, 1958), 'yaml.load', 'yaml.load', (['self.body'], {'Loader': 'SafeLoader'}), '(self.body, Loader=SafeLoader)\n', (1928, 1958), False, 'import yaml\n')]
|
from unittest import TestCase
from authlib.oauth2.rfc7591 import ClientMetadataClaims
from authlib.jose.errors import InvalidClaimError
class ClientMetadataClaimsTest(TestCase):
def test_validate_redirect_uris(self):
claims = ClientMetadataClaims({'redirect_uris': ['foo']}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_client_uri(self):
claims = ClientMetadataClaims({'client_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_logo_uri(self):
claims = ClientMetadataClaims({'logo_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_tos_uri(self):
claims = ClientMetadataClaims({'tos_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_policy_uri(self):
claims = ClientMetadataClaims({'policy_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_jwks_uri(self):
claims = ClientMetadataClaims({'jwks_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
|
[
"authlib.oauth2.rfc7591.ClientMetadataClaims"
] |
[((240, 292), 'authlib.oauth2.rfc7591.ClientMetadataClaims', 'ClientMetadataClaims', (["{'redirect_uris': ['foo']}", '{}'], {}), "({'redirect_uris': ['foo']}, {})\n", (260, 292), False, 'from authlib.oauth2.rfc7591 import ClientMetadataClaims\n'), ((413, 460), 'authlib.oauth2.rfc7591.ClientMetadataClaims', 'ClientMetadataClaims', (["{'client_uri': 'foo'}", '{}'], {}), "({'client_uri': 'foo'}, {})\n", (433, 460), False, 'from authlib.oauth2.rfc7591 import ClientMetadataClaims\n'), ((579, 624), 'authlib.oauth2.rfc7591.ClientMetadataClaims', 'ClientMetadataClaims', (["{'logo_uri': 'foo'}", '{}'], {}), "({'logo_uri': 'foo'}, {})\n", (599, 624), False, 'from authlib.oauth2.rfc7591 import ClientMetadataClaims\n'), ((742, 786), 'authlib.oauth2.rfc7591.ClientMetadataClaims', 'ClientMetadataClaims', (["{'tos_uri': 'foo'}", '{}'], {}), "({'tos_uri': 'foo'}, {})\n", (762, 786), False, 'from authlib.oauth2.rfc7591 import ClientMetadataClaims\n'), ((907, 954), 'authlib.oauth2.rfc7591.ClientMetadataClaims', 'ClientMetadataClaims', (["{'policy_uri': 'foo'}", '{}'], {}), "({'policy_uri': 'foo'}, {})\n", (927, 954), False, 'from authlib.oauth2.rfc7591 import ClientMetadataClaims\n'), ((1073, 1118), 'authlib.oauth2.rfc7591.ClientMetadataClaims', 'ClientMetadataClaims', (["{'jwks_uri': 'foo'}", '{}'], {}), "({'jwks_uri': 'foo'}, {})\n", (1093, 1118), False, 'from authlib.oauth2.rfc7591 import ClientMetadataClaims\n')]
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ckeditor.fields import RichTextField
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
class Document(models.Model):
""" This model describes a document """
title = models.CharField(max_length=50)
description = models.CharField(max_length=100, blank=True)
body = RichTextField(blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
return super(Document, self).save(*args, **kwargs)
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"ckeditor.fields.RichTextField",
"django.db.models.DateTimeField"
] |
[((822, 853), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (838, 853), False, 'from django.db import models\n'), ((872, 916), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (888, 916), False, 'from django.db import models\n'), ((928, 953), 'ckeditor.fields.RichTextField', 'RichTextField', ([], {'blank': '(True)'}), '(blank=True)\n', (941, 953), False, 'from ckeditor.fields import RichTextField\n'), ((965, 1014), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (982, 1014), False, 'from django.db import models\n'), ((1029, 1065), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'editable': '(False)'}), '(editable=False)\n', (1049, 1065), False, 'from django.db import models\n'), ((1155, 1169), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1167, 1169), False, 'from django.utils import timezone\n')]
|
import os
from pathlib import Path
from unittest import mock
from kabuka import kabuka, get_latest_price
TEST_DATA_DIR = Path(os.path.dirname(os.path.realpath(__file__))) / "test_data"
def test_is_numeric():
assert not kabuka.is_numeric("abc")
assert not kabuka.is_numeric("123a")
assert not kabuka.is_numeric("")
assert not kabuka.is_numeric("this is not a number")
assert not kabuka.is_numeric("a123")
assert not kabuka.is_numeric("a123a")
assert not kabuka.is_numeric("1a123")
assert not kabuka.is_numeric("0.0.0.0")
assert not kabuka.is_numeric("1,234.678")
assert kabuka.is_numeric("123")
assert kabuka.is_numeric(".123")
assert kabuka.is_numeric("123.")
assert kabuka.is_numeric("123.456")
assert kabuka.is_numeric("123_456")
assert kabuka.is_numeric("0.123")
assert kabuka.is_numeric(".123_456")
assert kabuka.is_numeric("123_456.")
assert kabuka.is_numeric("123_456.789_101")
def mocked_requests_get(url):
class MockedResponse:
def __init__(self, text):
self.text = text
uri = url.replace("https://finance.yahoo.com/quote", str(TEST_DATA_DIR)) + ".html"
try:
with open(uri, "rb") as f:
return MockedResponse(f.read().decode("utf8"))
except IOError:
with open(TEST_DATA_DIR / "unknown_symbol.html") as f:
return MockedResponse(f.read())
@mock.patch("requests.get", side_effect=mocked_requests_get)
def test_get_lastest_price(mock_get_latest_price):
# stonk
price = get_latest_price("TSLA")
assert kabuka.is_numeric(price) and float(price) >= 0
# ETF
price = get_latest_price("SPY")
assert kabuka.is_numeric(price) and float(price) >= 0
# tokyo stock exchange
price = get_latest_price("4385.T")
assert kabuka.is_numeric(price) and float(price) >= 0
# invalid symbol
try:
price = get_latest_price("")
except ValueError:
assert True
else:
assert False
# invalid symbol
try:
price = get_latest_price("apple.com")
except ValueError:
assert True
else:
assert False
|
[
"unittest.mock.patch",
"kabuka.get_latest_price",
"os.path.realpath",
"kabuka.kabuka.is_numeric"
] |
[((1401, 1460), 'unittest.mock.patch', 'mock.patch', (['"""requests.get"""'], {'side_effect': 'mocked_requests_get'}), "('requests.get', side_effect=mocked_requests_get)\n", (1411, 1460), False, 'from unittest import mock\n'), ((612, 636), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123"""'], {}), "('123')\n", (629, 636), False, 'from kabuka import kabuka, get_latest_price\n'), ((648, 673), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['""".123"""'], {}), "('.123')\n", (665, 673), False, 'from kabuka import kabuka, get_latest_price\n'), ((685, 710), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123."""'], {}), "('123.')\n", (702, 710), False, 'from kabuka import kabuka, get_latest_price\n'), ((722, 750), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123.456"""'], {}), "('123.456')\n", (739, 750), False, 'from kabuka import kabuka, get_latest_price\n'), ((762, 790), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123_456"""'], {}), "('123_456')\n", (779, 790), False, 'from kabuka import kabuka, get_latest_price\n'), ((802, 828), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""0.123"""'], {}), "('0.123')\n", (819, 828), False, 'from kabuka import kabuka, get_latest_price\n'), ((840, 869), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['""".123_456"""'], {}), "('.123_456')\n", (857, 869), False, 'from kabuka import kabuka, get_latest_price\n'), ((881, 910), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123_456."""'], {}), "('123_456.')\n", (898, 910), False, 'from kabuka import kabuka, get_latest_price\n'), ((922, 958), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123_456.789_101"""'], {}), "('123_456.789_101')\n", (939, 958), False, 'from kabuka import kabuka, get_latest_price\n'), ((1536, 1560), 'kabuka.get_latest_price', 'get_latest_price', (['"""TSLA"""'], {}), "('TSLA')\n", (1552, 1560), False, 'from kabuka import kabuka, get_latest_price\n'), ((1642, 1665), 'kabuka.get_latest_price', 'get_latest_price', (['"""SPY"""'], {}), "('SPY')\n", (1658, 1665), False, 'from kabuka import kabuka, get_latest_price\n'), ((1764, 1790), 'kabuka.get_latest_price', 'get_latest_price', (['"""4385.T"""'], {}), "('4385.T')\n", (1780, 1790), False, 'from kabuka import kabuka, get_latest_price\n'), ((226, 250), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""abc"""'], {}), "('abc')\n", (243, 250), False, 'from kabuka import kabuka, get_latest_price\n'), ((266, 291), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""123a"""'], {}), "('123a')\n", (283, 291), False, 'from kabuka import kabuka, get_latest_price\n'), ((307, 328), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['""""""'], {}), "('')\n", (324, 328), False, 'from kabuka import kabuka, get_latest_price\n'), ((344, 385), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""this is not a number"""'], {}), "('this is not a number')\n", (361, 385), False, 'from kabuka import kabuka, get_latest_price\n'), ((401, 426), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""a123"""'], {}), "('a123')\n", (418, 426), False, 'from kabuka import kabuka, get_latest_price\n'), ((442, 468), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""a123a"""'], {}), "('a123a')\n", (459, 468), False, 'from kabuka import kabuka, get_latest_price\n'), ((484, 510), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""1a123"""'], {}), "('1a123')\n", (501, 510), False, 'from kabuka import kabuka, get_latest_price\n'), ((526, 554), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (543, 554), False, 'from kabuka import kabuka, get_latest_price\n'), ((570, 600), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['"""1,234.678"""'], {}), "('1,234.678')\n", (587, 600), False, 'from kabuka import kabuka, get_latest_price\n'), ((1572, 1596), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['price'], {}), '(price)\n', (1589, 1596), False, 'from kabuka import kabuka, get_latest_price\n'), ((1677, 1701), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['price'], {}), '(price)\n', (1694, 1701), False, 'from kabuka import kabuka, get_latest_price\n'), ((1802, 1826), 'kabuka.kabuka.is_numeric', 'kabuka.is_numeric', (['price'], {}), '(price)\n', (1819, 1826), False, 'from kabuka import kabuka, get_latest_price\n'), ((1896, 1916), 'kabuka.get_latest_price', 'get_latest_price', (['""""""'], {}), "('')\n", (1912, 1916), False, 'from kabuka import kabuka, get_latest_price\n'), ((2038, 2067), 'kabuka.get_latest_price', 'get_latest_price', (['"""apple.com"""'], {}), "('apple.com')\n", (2054, 2067), False, 'from kabuka import kabuka, get_latest_price\n'), ((144, 170), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (160, 170), False, 'import os\n')]
|
"""empty message
Revision ID: f24691273ca4
Revises: <PASSWORD>
Create Date: 2019-06-18 13:45:46.250079
"""
# revision identifiers, used by Alembic.
revision = 'f24691273ca4'
down_revision = 'b<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('user_id', sa.String(length=300), nullable=False),
sa.Column('first_name', sa.String(length=300), nullable=True),
sa.Column('last_name', sa.String(length=300), nullable=True),
sa.Column('email', sa.String(length=300), nullable=True),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('gender', sa.String(length=300), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
op.drop_table('user')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('first_name', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.Column('last_name', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.Column('age', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('email', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.Column('gender', sa.VARCHAR(length=300), autoincrement=False, nullable=True),
sa.Column('user_id', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('first_name', 'last_name', name='user_pkey')
)
op.drop_table('users')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.VARCHAR",
"sqlalchemy.INTEGER",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((790, 811), 'alembic.op.drop_table', 'op.drop_table', (['"""user"""'], {}), "('user')\n", (803, 811), False, 'from alembic import op\n'), ((1551, 1573), 'alembic.op.drop_table', 'op.drop_table', (['"""users"""'], {}), "('users')\n", (1564, 1573), False, 'from alembic import op\n'), ((745, 779), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""user_id"""'], {}), "('user_id')\n", (768, 779), True, 'import sqlalchemy as sa\n'), ((1472, 1540), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""first_name"""', '"""last_name"""'], {'name': '"""user_pkey"""'}), "('first_name', 'last_name', name='user_pkey')\n", (1495, 1540), True, 'import sqlalchemy as sa\n'), ((392, 413), 'sqlalchemy.String', 'sa.String', ([], {'length': '(300)'}), '(length=300)\n', (401, 413), True, 'import sqlalchemy as sa\n'), ((460, 481), 'sqlalchemy.String', 'sa.String', ([], {'length': '(300)'}), '(length=300)\n', (469, 481), True, 'import sqlalchemy as sa\n'), ((526, 547), 'sqlalchemy.String', 'sa.String', ([], {'length': '(300)'}), '(length=300)\n', (535, 547), True, 'import sqlalchemy as sa\n'), ((588, 609), 'sqlalchemy.String', 'sa.String', ([], {'length': '(300)'}), '(length=300)\n', (597, 609), True, 'import sqlalchemy as sa\n'), ((648, 660), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (658, 660), True, 'import sqlalchemy as sa\n'), ((702, 723), 'sqlalchemy.String', 'sa.String', ([], {'length': '(300)'}), '(length=300)\n', (711, 723), True, 'import sqlalchemy as sa\n'), ((988, 1010), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(300)'}), '(length=300)\n', (998, 1010), True, 'import sqlalchemy as sa\n'), ((1077, 1099), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(300)'}), '(length=300)\n', (1087, 1099), True, 'import sqlalchemy as sa\n'), ((1160, 1172), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (1170, 1172), True, 'import sqlalchemy as sa\n'), ((1234, 1256), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(300)'}), '(length=300)\n', (1244, 1256), True, 'import sqlalchemy as sa\n'), ((1320, 1342), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(300)'}), '(length=300)\n', (1330, 1342), True, 'import sqlalchemy as sa\n'), ((1406, 1428), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(300)'}), '(length=300)\n', (1416, 1428), True, 'import sqlalchemy as sa\n')]
|
import numpy as np
from mpi4py import MPI
from SIMP import TO_SIMP, make_Conn_matrix
def get_void(nely,nelx):
v=np.zeros((nely,nelx))
R=min(nely,nelx)/15
loc=np.array([[1/3, 1/4], [2/3, 1/4],[ 1/3, 1/2], [2/3, 1/2], [1/3 , 3/4], [2/3, 3/4]])
loc=loc*np.array([[nely,nelx]])
for i in range(nely):
for j in range(nelx):
v[i,j]=R-np.min(np.sqrt(np.sum((loc-np.array([[i+1,j+1]]))**2,1)));
v=v>0
return v
def evaluate(x0,volfrac,void,Iar,cMat):
beta=0.05
epsilon_2=0.25
nelx=90
nely=45
penal=3
E0=1
nu=0.3
max_move=0.25
if np.mean(x0)>volfrac:
x0=x0*volfrac/np.mean(x0)
_,c1 = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,True,void,np.zeros((1,nely,nelx)),0,10)
_,c2 = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,True,void,np.zeros((1,nely,nelx)),0,0)
return c1,c2
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
nelx=90
nely=45
volfrac=0.4+0.6*0.0001
void=get_void(nely,nelx)
Iar,cMat=make_Conn_matrix(nelx,nely)
num_samples=100000
perrank=int(np.ceil(num_samples/size))
num_samples=perrank*size
C_rand_rank=np.zeros(perrank)
C_rand_opt_rank=np.zeros(perrank)
C_rand_inner_opt_rank=np.zeros(perrank)
X_rand_rank=np.zeros((perrank,nely,nelx))
for i in range(perrank):
X_rand_rank[i]=np.random.rand(nely,nelx)**1.5
X_rand_rank_inner_i=X_rand_rank[i]*0.5+0.2
C_rand_opt_rank[i],C_rand_rank[i]=evaluate(X_rand_rank[i],volfrac,void,Iar,cMat)
C_rand_inner_opt_rank[i],_=evaluate(X_rand_rank_inner_i,volfrac,void,Iar,cMat)
if rank==0:
X_rand=np.zeros((perrank*size,nely,nelx))
C_rand=np.zeros(perrank*size)
C_rand_opt=np.zeros(perrank*size)
C_rand_inner_opt=np.zeros(perrank*size)
else:
X_rand=None
C_rand=None
C_rand_opt=None
C_rand_inner_opt=None
comm.Gather(C_rand_rank,C_rand,root=0)
comm.Gather(C_rand_opt_rank,C_rand_opt,root=0)
comm.Gather(C_rand_inner_opt_rank,C_rand_inner_opt,root=0)
comm.Gather(X_rand_rank,X_rand,root=0)
if rank==0:
np.save('Sample_data/X_rand.npy',X_rand)
np.save('Sample_data/C_rand_opt.npy',C_rand_opt)
np.save('Sample_data/C_rand_inner_opt.npy',C_rand_inner_opt)
np.save('Sample_data/C_rand.npy',C_rand)
|
[
"numpy.save",
"numpy.ceil",
"SIMP.make_Conn_matrix",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.random.rand"
] |
[((1080, 1108), 'SIMP.make_Conn_matrix', 'make_Conn_matrix', (['nelx', 'nely'], {}), '(nelx, nely)\n', (1096, 1108), False, 'from SIMP import TO_SIMP, make_Conn_matrix\n'), ((1206, 1223), 'numpy.zeros', 'np.zeros', (['perrank'], {}), '(perrank)\n', (1214, 1223), True, 'import numpy as np\n'), ((1240, 1257), 'numpy.zeros', 'np.zeros', (['perrank'], {}), '(perrank)\n', (1248, 1257), True, 'import numpy as np\n'), ((1280, 1297), 'numpy.zeros', 'np.zeros', (['perrank'], {}), '(perrank)\n', (1288, 1297), True, 'import numpy as np\n'), ((1310, 1341), 'numpy.zeros', 'np.zeros', (['(perrank, nely, nelx)'], {}), '((perrank, nely, nelx))\n', (1318, 1341), True, 'import numpy as np\n'), ((117, 139), 'numpy.zeros', 'np.zeros', (['(nely, nelx)'], {}), '((nely, nelx))\n', (125, 139), True, 'import numpy as np\n'), ((171, 282), 'numpy.array', 'np.array', (['[[1 / 3, 1 / 4], [2 / 3, 1 / 4], [1 / 3, 1 / 2], [2 / 3, 1 / 2], [1 / 3, 3 /\n 4], [2 / 3, 3 / 4]]'], {}), '([[1 / 3, 1 / 4], [2 / 3, 1 / 4], [1 / 3, 1 / 2], [2 / 3, 1 / 2], [\n 1 / 3, 3 / 4], [2 / 3, 3 / 4]])\n', (179, 282), True, 'import numpy as np\n'), ((1141, 1168), 'numpy.ceil', 'np.ceil', (['(num_samples / size)'], {}), '(num_samples / size)\n', (1148, 1168), True, 'import numpy as np\n'), ((1668, 1706), 'numpy.zeros', 'np.zeros', (['(perrank * size, nely, nelx)'], {}), '((perrank * size, nely, nelx))\n', (1676, 1706), True, 'import numpy as np\n'), ((1714, 1738), 'numpy.zeros', 'np.zeros', (['(perrank * size)'], {}), '(perrank * size)\n', (1722, 1738), True, 'import numpy as np\n'), ((1752, 1776), 'numpy.zeros', 'np.zeros', (['(perrank * size)'], {}), '(perrank * size)\n', (1760, 1776), True, 'import numpy as np\n'), ((1796, 1820), 'numpy.zeros', 'np.zeros', (['(perrank * size)'], {}), '(perrank * size)\n', (1804, 1820), True, 'import numpy as np\n'), ((2109, 2150), 'numpy.save', 'np.save', (['"""Sample_data/X_rand.npy"""', 'X_rand'], {}), "('Sample_data/X_rand.npy', X_rand)\n", (2116, 2150), True, 'import numpy as np\n'), ((2154, 2203), 'numpy.save', 'np.save', (['"""Sample_data/C_rand_opt.npy"""', 'C_rand_opt'], {}), "('Sample_data/C_rand_opt.npy', C_rand_opt)\n", (2161, 2203), True, 'import numpy as np\n'), ((2207, 2268), 'numpy.save', 'np.save', (['"""Sample_data/C_rand_inner_opt.npy"""', 'C_rand_inner_opt'], {}), "('Sample_data/C_rand_inner_opt.npy', C_rand_inner_opt)\n", (2214, 2268), True, 'import numpy as np\n'), ((2272, 2313), 'numpy.save', 'np.save', (['"""Sample_data/C_rand.npy"""', 'C_rand'], {}), "('Sample_data/C_rand.npy', C_rand)\n", (2279, 2313), True, 'import numpy as np\n'), ((267, 291), 'numpy.array', 'np.array', (['[[nely, nelx]]'], {}), '([[nely, nelx]])\n', (275, 291), True, 'import numpy as np\n'), ((605, 616), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (612, 616), True, 'import numpy as np\n'), ((764, 789), 'numpy.zeros', 'np.zeros', (['(1, nely, nelx)'], {}), '((1, nely, nelx))\n', (772, 789), True, 'import numpy as np\n'), ((889, 914), 'numpy.zeros', 'np.zeros', (['(1, nely, nelx)'], {}), '((1, nely, nelx))\n', (897, 914), True, 'import numpy as np\n'), ((1384, 1410), 'numpy.random.rand', 'np.random.rand', (['nely', 'nelx'], {}), '(nely, nelx)\n', (1398, 1410), True, 'import numpy as np\n'), ((648, 659), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (655, 659), True, 'import numpy as np\n'), ((395, 421), 'numpy.array', 'np.array', (['[[i + 1, j + 1]]'], {}), '([[i + 1, j + 1]])\n', (403, 421), True, 'import numpy as np\n')]
|
import time
from datetime import date
from spydr.spydr import Spydr
from urllib.parse import urlparse
target_url = input("Enter the URL to crawl:\t")
start_time = time.time()
domain_file_name = urlparse(target_url).netloc.replace(".", "_")
result = Spydr().crawl(target_url)
end_time = time.time()
print(f"\n{len(result)} URLs Crawled in {end_time - start_time} seconds")
file_name = domain_file_name + "_" + str(date.today().isoformat()) + ".txt"
with open(file_name, 'w') as wf:
for entry in result:
wf.write(entry + "\n")
print("Result file created!")
|
[
"spydr.spydr.Spydr",
"datetime.date.today",
"urllib.parse.urlparse",
"time.time"
] |
[((166, 177), 'time.time', 'time.time', ([], {}), '()\n', (175, 177), False, 'import time\n'), ((292, 303), 'time.time', 'time.time', ([], {}), '()\n', (301, 303), False, 'import time\n'), ((254, 261), 'spydr.spydr.Spydr', 'Spydr', ([], {}), '()\n', (259, 261), False, 'from spydr.spydr import Spydr\n'), ((198, 218), 'urllib.parse.urlparse', 'urlparse', (['target_url'], {}), '(target_url)\n', (206, 218), False, 'from urllib.parse import urlparse\n'), ((421, 433), 'datetime.date.today', 'date.today', ([], {}), '()\n', (431, 433), False, 'from datetime import date\n')]
|
from typing import Tuple
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
class MNIST:
def __init__(self, with_normalization: bool = True) -> None:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
self.x_train_: np.ndarray = None
self.y_train_: np.ndarray = None
self.x_val_: np.ndarray = None
self.y_val_: np.ndarray = None
self.val_size = 0
self.train_splitted_size = 0
# Preprocess x data
self.x_train = x_train.astype(np.float32)
self.x_train = np.expand_dims(x_train, axis=-1)
if with_normalization:
self.x_train = self.x_train / 255.0
self.x_test = x_test.astype(np.float32)
self.x_test = np.expand_dims(x_test, axis=-1)
if with_normalization:
self.x_test = self.x_test / 255.0
# Dataset attributes
self.train_size = self.x_train.shape[0]
self.test_size = self.x_test.shape[0]
self.width = self.x_train.shape[1]
self.height = self.x_train.shape[2]
self.depth = self.x_train.shape[3]
self.img_shape = (self.width, self.height, self.depth)
self.num_classes = 10
# Preprocess y data
self.y_train = to_categorical(y_train, num_classes=self.num_classes)
self.y_test = to_categorical(y_test, num_classes=self.num_classes)
def get_train_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_train, self.y_train
def get_test_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_test, self.y_test
def get_splitted_train_validation_set(self, validation_size: float = 0.33) -> tuple:
self.x_train_, self.x_val_, self.y_train_, self.y_val_ = train_test_split(
self.x_train,
self.y_train,
test_size=validation_size
)
self.val_size = self.x_val_.shape[0]
self.train_splitted_size = self.x_train_.shape[0]
return self.x_train_, self.x_val_, self.y_train_, self.y_val_
def data_augmentation(self, augment_size: int = 5_000) -> None:
image_generator = ImageDataGenerator(
rotation_range=5,
zoom_range=0.08,
width_shift_range=0.08,
height_shift_range=0.08
)
# Fit the data generator
image_generator.fit(self.x_train, augment=True)
# Get random train images for the data augmentation
rand_idxs = np.random.randint(self.train_size, size=augment_size)
x_augmented = self.x_train[rand_idxs].copy()
y_augmented = self.y_train[rand_idxs].copy()
x_augmented = image_generator.flow(
x_augmented,
np.zeros(augment_size),
batch_size=augment_size,
shuffle=False
).next()[0]
# Append the augmented images to the train set
self.x_train = np.concatenate((self.x_train, x_augmented))
self.y_train = np.concatenate((self.y_train, y_augmented))
self.train_size = self.x_train.shape[0]
|
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.utils.to_categorical",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.random.randint",
"numpy.concatenate"
] |
[((398, 415), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (413, 415), False, 'from tensorflow.keras.datasets import mnist\n'), ((749, 781), 'numpy.expand_dims', 'np.expand_dims', (['x_train'], {'axis': '(-1)'}), '(x_train, axis=-1)\n', (763, 781), True, 'import numpy as np\n'), ((935, 966), 'numpy.expand_dims', 'np.expand_dims', (['x_test'], {'axis': '(-1)'}), '(x_test, axis=-1)\n', (949, 966), True, 'import numpy as np\n'), ((1453, 1506), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {'num_classes': 'self.num_classes'}), '(y_train, num_classes=self.num_classes)\n', (1467, 1506), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1530, 1582), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test'], {'num_classes': 'self.num_classes'}), '(y_test, num_classes=self.num_classes)\n', (1544, 1582), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1954, 2025), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.x_train', 'self.y_train'], {'test_size': 'validation_size'}), '(self.x_train, self.y_train, test_size=validation_size)\n', (1970, 2025), False, 'from sklearn.model_selection import train_test_split\n'), ((2350, 2457), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(5)', 'zoom_range': '(0.08)', 'width_shift_range': '(0.08)', 'height_shift_range': '(0.08)'}), '(rotation_range=5, zoom_range=0.08, width_shift_range=\n 0.08, height_shift_range=0.08)\n', (2368, 2457), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2689, 2742), 'numpy.random.randint', 'np.random.randint', (['self.train_size'], {'size': 'augment_size'}), '(self.train_size, size=augment_size)\n', (2706, 2742), True, 'import numpy as np\n'), ((3125, 3168), 'numpy.concatenate', 'np.concatenate', (['(self.x_train, x_augmented)'], {}), '((self.x_train, x_augmented))\n', (3139, 3168), True, 'import numpy as np\n'), ((3193, 3236), 'numpy.concatenate', 'np.concatenate', (['(self.y_train, y_augmented)'], {}), '((self.y_train, y_augmented))\n', (3207, 3236), True, 'import numpy as np\n'), ((2935, 2957), 'numpy.zeros', 'np.zeros', (['augment_size'], {}), '(augment_size)\n', (2943, 2957), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from logzero import logger
from apis import storage
if __name__ == "__main__":
import cli
from config import cfg
cfg.load()
parser = cli.build_parser()
args = cli.parse_args(parser)
parser.add_argument(
"-g",
"--gcs-bucket-prefix",
default=cfg.gcs_bucket_prefix,
help="The GCS bucket prefix to publish the static site under.",
)
parser.add_argument(
"-s",
"--site-hostname",
default=cfg.hostname,
help="Fully-qualified domain name of the published site. Used in cache purging / priming methods.",
)
args = cli.parse_args(parser)
storage.remove_subpath_from_gcs(
client=storage.get_client(),
bucket_id=args.site_hostname,
prefix=args.gcs_bucket_prefix,
)
logger.info(
f"Subpath deletion {args.gcs_bucket_prefix} for {args.site_hostname} completed! 🎉"
)
|
[
"cli.build_parser",
"logzero.logger.info",
"apis.storage.get_client",
"cli.parse_args",
"config.cfg.load"
] |
[((150, 160), 'config.cfg.load', 'cfg.load', ([], {}), '()\n', (158, 160), False, 'from config import cfg\n'), ((174, 192), 'cli.build_parser', 'cli.build_parser', ([], {}), '()\n', (190, 192), False, 'import cli\n'), ((204, 226), 'cli.parse_args', 'cli.parse_args', (['parser'], {}), '(parser)\n', (218, 226), False, 'import cli\n'), ((635, 657), 'cli.parse_args', 'cli.parse_args', (['parser'], {}), '(parser)\n', (649, 657), False, 'import cli\n'), ((820, 925), 'logzero.logger.info', 'logger.info', (['f"""Subpath deletion {args.gcs_bucket_prefix} for {args.site_hostname} completed! 🎉"""'], {}), "(\n f'Subpath deletion {args.gcs_bucket_prefix} for {args.site_hostname} completed! 🎉'\n )\n", (831, 925), False, 'from logzero import logger\n'), ((711, 731), 'apis.storage.get_client', 'storage.get_client', ([], {}), '()\n', (729, 731), False, 'from apis import storage\n')]
|
import os
import math
import json
import uuid
import unittest
import importlib
import subprocess
from unittest import mock
from .. import rules
from .. import config
from .. import helpers
from .. import importer
from .. import constants
from .. import exceptions
from .. import scopes_tree
TEST_FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
class TestApplyRules(unittest.TestCase):
def setUp(self):
self.source_module = 'smart_imports.tests.fake_package.config_variables'
self.config = config.DEFAULT_CONFIG.clone(path='#config.1',
rules=[{'type': 'rule_custom',
'variables': {'config_variable': {'module': self.source_module}}},
{'type': 'rule_local_modules'},
{'type': 'rule_stdlib'},
{'type': 'rule_predefined_names'}])
self.module = type(os)
def test_command_not_found(self):
result = importer.apply_rules(module_config=self.config,
module=self.module,
variable='x')
self.assertEqual(result, None)
def test_command_found(self):
command = importer.apply_rules(module_config=self.config,
module=self.module,
variable='config_variable')
self.assertEqual(command, rules.ImportCommand(target_module=self.module,
target_attribute='config_variable',
source_module=self.source_module,
source_attribute=None))
def test_rules_priority(self):
test_config = config.DEFAULT_CONFIG.clone(path='#config.2',
rules=[{'type': 'rule_custom',
'variables': {'var_1': {'module': 'math'}}},
{'type': 'rule_custom',
'variables': {'var_1': {'module': 'json'}}}])
command = importer.apply_rules(module_config=test_config,
module=self.module,
variable='var_1')
self.assertEqual(command, rules.ImportCommand(target_module=self.module,
target_attribute='var_1',
source_module='math',
source_attribute=None))
class TestGetModuleScopesTree(unittest.TestCase):
def test(self):
source = '''
x = 1
def y(q):
return q + z
'''
scope = importer.get_module_scopes_tree(source)
self.assertEqual(scope.variables, {'x': scopes_tree.VariableInfo(constants.VARIABLE_STATE.INITIALIZED, 2),
'y': scopes_tree.VariableInfo(constants.VARIABLE_STATE.INITIALIZED, 4)})
self.assertEqual(scope.children[0].variables,
{'q': scopes_tree.VariableInfo(constants.VARIABLE_STATE.INITIALIZED, 4),
'z': scopes_tree.VariableInfo(constants.VARIABLE_STATE.UNINITIALIZED, 5)})
class TestExtractVariables(unittest.TestCase):
def test_empty_source(self):
self.assertEqual(importer.extract_variables(''), ([], {}))
def test_has_source(self):
source = '''
x = 1 + y
def y():
return x + z
'''
self.assertEqual(set(importer.extract_variables(source)[0]),
{'z', 'y'})
class TestProcessModule(unittest.TestCase):
SIMPLE_SOURCE = '''
x = 'X'
def y(z):
return z + math.log(1)
'''
def apply_commands(self, commands):
for command in commands:
command()
def test_process_simple(self):
module_name = 'process_simple_' + uuid.uuid4().hex
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(self.SIMPLE_SOURCE)
module = importlib.import_module(module_name)
self.assertEqual(getattr(module, 'math', None), None)
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=module)
self.assertEqual(commands,
[rules.ImportCommand(target_module=module,
target_attribute='math',
source_module='math',
source_attribute=None)])
self.apply_commands(commands)
self.assertEqual(getattr(module, 'math'), math)
def test_process_simple__cached(self):
module_name = 'process_simple_' + uuid.uuid4().hex
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(self.SIMPLE_SOURCE)
module = importlib.import_module(module_name)
self.assertEqual(getattr(module, 'math', None), None)
# not required to create other temp directory, since filenames do not intersect
test_config = config.DEFAULT_CONFIG.clone(cache_dir=temp_directory)
commands = importer.process_module(module_config=test_config,
module=module)
self.apply_commands(commands)
self.assertEqual(getattr(module, 'math'), math)
self.assertTrue(os.path.isfile(os.path.join(temp_directory, module_name + '.cache')))
with mock.patch('smart_imports.importer.extract_variables') as extract_variables:
importer.process_module(module_config=test_config,
module=module)
extract_variables.assert_not_called()
def prepair_data(self, temp_directory):
modules_names = []
for i in range(1, 5):
modules_names.append('process_module_circular_{}_{}'.format(i, uuid.uuid4().hex))
source_1 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
x = 1
def y():
return {module_2_name}.z()
'''.format(module_2_name=modules_names[1])
source_2 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
def z():
return {module_1_name}.x
'''.format(module_1_name=modules_names[0])
source_3 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
x = 1
y = 10 + {module_4_name}.z
'''.format(module_4_name=modules_names[3])
source_4 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
z = 100 + {module_1_name}.x
'''.format(module_1_name=modules_names[0])
sources = [source_1, source_2, source_3, source_4]
for name, source in zip(modules_names, sources):
with open(os.path.join(temp_directory, name + '.py'), 'w') as f:
f.write(source)
return modules_names
def test_process_circular__local_namespace(self):
with helpers.test_directory() as temp_directory:
modules_names = self.prepair_data(temp_directory)
module = importlib.import_module(modules_names[0])
self.assertTrue(hasattr(module, modules_names[1]))
self.assertEqual(module.y(), 1)
def test_process_circular__global_namespace(self):
with helpers.test_directory() as temp_directory:
modules_names = self.prepair_data(temp_directory)
module = importlib.import_module(modules_names[2])
self.assertTrue(hasattr(module, modules_names[3]))
self.assertEqual(module.y, 111)
def test_no_import_found(self):
module_name = 'process_module_no_imports_{}'.format(uuid.uuid4().hex)
source = '''
def y():
print(x)
def z():
print(x)
'''
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(source)
module = importlib.import_module(module_name)
with self.assertRaises(exceptions.NoImportFound) as error:
importer.process_module(module_config=config.DEFAULT_CONFIG,
module=module)
self.assertEqual(set(error.exception.arguments['lines']), {3, 6})
def test_no_import_found__cached_module(self):
module_name = 'process_module_no_imports_{}'.format(uuid.uuid4().hex)
source = '''
def y():
print(x)
def z():
print(x)
'''
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(source)
module = importlib.import_module(module_name)
# not required to create other temp directory, since filenames do not intersect
test_config = config.DEFAULT_CONFIG.clone(cache_dir=temp_directory)
# test repeated calls
for i in range(3):
with self.assertRaises(exceptions.NoImportFound) as error:
importer.process_module(module_config=test_config,
module=module)
self.assertEqual(set(error.exception.arguments['lines']), {3, 6})
self.assertTrue(os.path.isfile(os.path.join(temp_directory, module_name + '.cache')))
class TestAll(unittest.TestCase):
def test(self):
self.assertNotIn('string', globals())
importer.all(importlib.import_module('smart_imports.tests.test_importer'))
self.assertIn('string', globals())
self.assertEqual(string.digits, '0123456789')
class TestSimpleScript(unittest.TestCase):
def prepair_modules(self, base_directory):
os.makedirs(os.path.join(base_directory, 'a', 'b', 'c'))
script = '''
import smart_imports
smart_imports.all()
myprint((__name__, datetime.datetime.now()))
'''
with open(os.path.join(base_directory, 'a.py'), 'w') as f:
f.write(script)
config = {'rules': [{'type': 'rule_predefined_names'},
{'type': 'rule_stdlib'},
{'type': 'rule_custom',
'variables': {'myprint': {'module': 'pprint', 'attribute': 'pprint'}}}]}
with open(os.path.join(base_directory, 'smart_imports.json'), 'w') as f:
f.write(json.dumps(config))
def test(self):
with helpers.test_directory() as temp_directory:
self.prepair_modules(temp_directory)
output = subprocess.check_output(['python', os.path.join(temp_directory, 'a.py')])
self.assertIn(b"'__main__'", output)
self.assertIn(b"datetime.datetime", output)
|
[
"uuid.uuid4",
"importlib.import_module",
"os.path.dirname",
"json.dumps",
"unittest.mock.patch",
"os.path.join"
] |
[((329, 354), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (344, 354), False, 'import os\n'), ((4424, 4460), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (4447, 4460), False, 'import importlib\n'), ((5424, 5460), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (5447, 5460), False, 'import importlib\n'), ((8960, 9001), 'importlib.import_module', 'importlib.import_module', (['modules_names[0]'], {}), '(modules_names[0])\n', (8983, 9001), False, 'import importlib\n'), ((9308, 9349), 'importlib.import_module', 'importlib.import_module', (['modules_names[2]'], {}), '(modules_names[2])\n', (9331, 9349), False, 'import importlib\n'), ((9840, 9876), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (9863, 9876), False, 'import importlib\n'), ((10563, 10599), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (10586, 10599), False, 'import importlib\n'), ((11355, 11415), 'importlib.import_module', 'importlib.import_module', (['"""smart_imports.tests.test_importer"""'], {}), "('smart_imports.tests.test_importer')\n", (11378, 11415), False, 'import importlib\n'), ((11629, 11672), 'os.path.join', 'os.path.join', (['base_directory', '"""a"""', '"""b"""', '"""c"""'], {}), "(base_directory, 'a', 'b', 'c')\n", (11641, 11672), False, 'import os\n'), ((4199, 4211), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4209, 4211), False, 'import uuid\n'), ((5199, 5211), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5209, 5211), False, 'import uuid\n'), ((6059, 6113), 'unittest.mock.patch', 'mock.patch', (['"""smart_imports.importer.extract_variables"""'], {}), "('smart_imports.importer.extract_variables')\n", (6069, 6113), False, 'from unittest import mock\n'), ((9556, 9568), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9566, 9568), False, 'import uuid\n'), ((10279, 10291), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10289, 10291), False, 'import uuid\n'), ((11815, 11851), 'os.path.join', 'os.path.join', (['base_directory', '"""a.py"""'], {}), "(base_directory, 'a.py')\n", (11827, 11851), False, 'import os\n'), ((12182, 12232), 'os.path.join', 'os.path.join', (['base_directory', '"""smart_imports.json"""'], {}), "(base_directory, 'smart_imports.json')\n", (12194, 12232), False, 'import os\n'), ((12265, 12283), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (12275, 12283), False, 'import json\n'), ((4296, 4345), 'os.path.join', 'os.path.join', (['temp_directory', "(module_name + '.py')"], {}), "(temp_directory, module_name + '.py')\n", (4308, 4345), False, 'import os\n'), ((5296, 5345), 'os.path.join', 'os.path.join', (['temp_directory', "(module_name + '.py')"], {}), "(temp_directory, module_name + '.py')\n", (5308, 5345), False, 'import os\n'), ((5986, 6038), 'os.path.join', 'os.path.join', (['temp_directory', "(module_name + '.cache')"], {}), "(temp_directory, module_name + '.cache')\n", (5998, 6038), False, 'import os\n'), ((8645, 8687), 'os.path.join', 'os.path.join', (['temp_directory', "(name + '.py')"], {}), "(temp_directory, name + '.py')\n", (8657, 8687), False, 'import os\n'), ((9724, 9773), 'os.path.join', 'os.path.join', (['temp_directory', "(module_name + '.py')"], {}), "(temp_directory, module_name + '.py')\n", (9736, 9773), False, 'import os\n'), ((10447, 10496), 'os.path.join', 'os.path.join', (['temp_directory', "(module_name + '.py')"], {}), "(temp_directory, module_name + '.py')\n", (10459, 10496), False, 'import os\n'), ((12469, 12505), 'os.path.join', 'os.path.join', (['temp_directory', '"""a.py"""'], {}), "(temp_directory, 'a.py')\n", (12481, 12505), False, 'import os\n'), ((6487, 6499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6497, 6499), False, 'import uuid\n'), ((11175, 11227), 'os.path.join', 'os.path.join', (['temp_directory', "(module_name + '.cache')"], {}), "(temp_directory, module_name + '.cache')\n", (11187, 11227), False, 'import os\n')]
|
import os
import time
import re
import unittest
import batch
from flask import Flask, Response, request
import requests
from .serverthread import ServerThread
class Test(unittest.TestCase):
def setUp(self):
self.batch = batch.client.BatchClient(url=os.environ.get('BATCH_URL'))
def test_job(self):
j = self.batch.create_job('alpine', ['echo', 'test'])
status = j.wait()
self.assertTrue('attributes' not in status)
self.assertEqual(status['state'], 'Complete')
self.assertEqual(status['exit_code'], 0)
self.assertEqual(status['log'], 'test\n')
self.assertEqual(j.log(), 'test\n')
self.assertTrue(j.is_complete())
def test_create_fails_for_closed_batch(self):
b = self.batch.create_batch()
b.close()
try:
b.create_job('alpine', ['echo', 'test'])
except requests.exceptions.HTTPError as err:
assert err.response.status_code == 400
assert re.search('.*invalid request: batch_id [0-9]+ is closed', err.response.text)
return
assert False
def test_batch_ttl(self):
b = self.batch.create_batch(ttl=1)
t = 1
while b.status()['is_open']:
if t > 64:
assert False, "took more than 128 seconds to close a batch with ttl 1"
time.sleep(t)
t = t * 2
def test_attributes(self):
a = {
'name': 'test_attributes',
'foo': 'bar'
}
j = self.batch.create_job('alpine', ['true'], attributes=a)
status = j.status()
assert(status['attributes'] == a)
def test_scratch_folder(self):
sb = 'gs://test-bucket/folder'
j = self.batch.create_job('alpine', ['true'], scratch_folder=sb)
status = j.status()
assert(status['scratch_folder'] == sb)
def test_fail(self):
j = self.batch.create_job('alpine', ['false'])
status = j.wait()
self.assertEqual(status['exit_code'], 1)
def test_deleted_job_log(self):
j = self.batch.create_job('alpine', ['echo', 'test'])
id = j.id
j.wait()
j.delete()
self.assertEqual(self.batch._get_job_log(id), 'test\n')
def test_delete_job(self):
j = self.batch.create_job('alpine', ['sleep', '30'])
id = j.id
j.delete()
# verify doesn't exist
try:
self.batch._get_job(id)
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_cancel_job(self):
j = self.batch.create_job('alpine', ['sleep', '30'])
status = j.status()
self.assertTrue(status['state'], 'Created')
j.cancel()
status = j.status()
self.assertTrue(status['state'], 'Cancelled')
self.assertTrue('log' not in status)
# cancelled job has no log
try:
j.log()
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_get_nonexistent_job(self):
try:
self.batch._get_job(666)
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_api_cancel_nonexistent_job(self):
try:
self.batch._cancel_job(666)
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_get_job(self):
j = self.batch.create_job('alpine', ['true'])
j2 = self.batch.get_job(j.id)
status2 = j2.status()
assert(status2['id'] == j.id)
def test_batch(self):
b = self.batch.create_batch()
j1 = b.create_job('alpine', ['false'])
j2 = b.create_job('alpine', ['sleep', '1'])
j3 = b.create_job('alpine', ['sleep', '30'])
# test list_jobs
jobs = self.batch.list_jobs()
self.assertTrue(
set([j.id for j in jobs]).issuperset([j1.id, j2.id, j3.id]))
# test refresh_k8s_state
self.batch._refresh_k8s_state()
j2.wait()
j3.cancel()
bstatus = b.wait()
n_cancelled = bstatus['jobs']['Cancelled']
n_complete = bstatus['jobs']['Complete']
self.assertTrue(n_cancelled <= 1)
self.assertTrue(n_cancelled + n_complete == 3)
n_failed = sum([ec > 0 for _, ec in bstatus['exit_codes'].items() if ec is not None])
self.assertTrue(n_failed == 1)
def test_callback(self):
app = Flask('test-client')
d = {}
@app.route('/test', methods=['POST'])
def test():
d['status'] = request.get_json()
return Response(status=200)
server = ServerThread(app)
try:
server.start()
j = self.batch.create_job(
'alpine',
['echo', 'test'],
attributes={'foo': 'bar'},
callback=server.url_for('/test'))
j.wait()
status = d['status']
self.assertEqual(status['state'], 'Complete')
self.assertEqual(status['attributes'], {'foo': 'bar'})
finally:
server.shutdown()
server.join()
|
[
"flask.Flask",
"time.sleep",
"os.environ.get",
"re.search",
"flask.Response",
"flask.request.get_json"
] |
[((4713, 4733), 'flask.Flask', 'Flask', (['"""test-client"""'], {}), "('test-client')\n", (4718, 4733), False, 'from flask import Flask, Response, request\n'), ((1358, 1371), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (1368, 1371), False, 'import time\n'), ((4843, 4861), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4859, 4861), False, 'from flask import Flask, Response, request\n'), ((4881, 4901), 'flask.Response', 'Response', ([], {'status': '(200)'}), '(status=200)\n', (4889, 4901), False, 'from flask import Flask, Response, request\n'), ((264, 291), 'os.environ.get', 'os.environ.get', (['"""BATCH_URL"""'], {}), "('BATCH_URL')\n", (278, 291), False, 'import os\n'), ((994, 1070), 're.search', 're.search', (['""".*invalid request: batch_id [0-9]+ is closed"""', 'err.response.text'], {}), "('.*invalid request: batch_id [0-9]+ is closed', err.response.text)\n", (1003, 1070), False, 'import re\n')]
|
import logging
from pkg_resources import iter_entry_points
import click
import click_plugins
from . import options
from .. import __version__
_context = dict(
token_normalize_func=lambda x: x.lower(),
help_option_names=['--help', '-h'],
auto_envvar_prefix='TILEZILLA'
)
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(message)s'
LOG_DATE_FORMAT = '%H:%M:%S'
@click_plugins.with_plugins(ep for ep in
iter_entry_points('tilez.commands'))
@click.group(help='tilezilla command line interface',
context_settings=_context)
@options.opt_config_file
@click.option('--verbose', '-v', count=True, help='Be louder')
@click.option('--quiet', '-q', count=True, help='Be quieter')
@click.version_option(__version__)
@click.pass_context
def cli(ctx, config_file, verbose, quiet):
verbosity = verbose - quiet
log_level = 20 - 10 * verbosity
# Logging config for tilez
logger = logging.getLogger('tilez')
formatter = logging.Formatter(LOG_FORMAT, LOG_DATE_FORMAT)
handler = logging.StreamHandler(click.get_text_stream('stdout'))
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(max(10, log_level)) # never below DEBUG (10)
# Logging for main module
main_logger = logging.getLogger('tilezilla')
if log_level <= 0: # log_level=NOSET (0) sets main logger to debug
main_logger.setLevel(logging.DEBUG)
# Parse config
ctx.obj = ctx.obj or {}
if config_file:
from ..config import parse_config
ctx.obj['config'] = parse_config(config_file)
|
[
"click.version_option",
"click.option",
"click.get_text_stream",
"logging.Formatter",
"click.group",
"pkg_resources.iter_entry_points",
"logging.getLogger"
] |
[((477, 556), 'click.group', 'click.group', ([], {'help': '"""tilezilla command line interface"""', 'context_settings': '_context'}), "(help='tilezilla command line interface', context_settings=_context)\n", (488, 556), False, 'import click\n'), ((596, 657), 'click.option', 'click.option', (['"""--verbose"""', '"""-v"""'], {'count': '(True)', 'help': '"""Be louder"""'}), "('--verbose', '-v', count=True, help='Be louder')\n", (608, 657), False, 'import click\n'), ((659, 719), 'click.option', 'click.option', (['"""--quiet"""', '"""-q"""'], {'count': '(True)', 'help': '"""Be quieter"""'}), "('--quiet', '-q', count=True, help='Be quieter')\n", (671, 719), False, 'import click\n'), ((721, 754), 'click.version_option', 'click.version_option', (['__version__'], {}), '(__version__)\n', (741, 754), False, 'import click\n'), ((931, 957), 'logging.getLogger', 'logging.getLogger', (['"""tilez"""'], {}), "('tilez')\n", (948, 957), False, 'import logging\n'), ((974, 1020), 'logging.Formatter', 'logging.Formatter', (['LOG_FORMAT', 'LOG_DATE_FORMAT'], {}), '(LOG_FORMAT, LOG_DATE_FORMAT)\n', (991, 1020), False, 'import logging\n'), ((1273, 1303), 'logging.getLogger', 'logging.getLogger', (['"""tilezilla"""'], {}), "('tilezilla')\n", (1290, 1303), False, 'import logging\n'), ((1057, 1088), 'click.get_text_stream', 'click.get_text_stream', (['"""stdout"""'], {}), "('stdout')\n", (1078, 1088), False, 'import click\n'), ((439, 474), 'pkg_resources.iter_entry_points', 'iter_entry_points', (['"""tilez.commands"""'], {}), "('tilez.commands')\n", (456, 474), False, 'from pkg_resources import iter_entry_points\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/datasets/datasets.retailrocket.ipynb (unless otherwise specified).
__all__ = ['RetailRocketDataset', 'RetailRocketDatasetv2']
# Cell
from typing import List, Optional, Callable, Union, Any, Tuple
import os
import os.path as osp
from collections.abc import Sequence
import sys
import numpy as np
import pandas as pd
from datetime import timezone, datetime, timedelta
import time
from ..utils.common_utils import download_url, extract_zip, makedirs
from .bases.common import Dataset
from .bases.session_graph import SessionGraphDataset
# Cell
class RetailRocketDataset(SessionGraphDataset):
train_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/train.txt"
test_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/test.txt"
all_train_seq_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/all_train_seq.txt"
def __init__(self, root, shuffle=False, n_node=40727, is_train=True):
self.n_node = n_node
self.shuffle = shuffle
self.is_train = is_train
super().__init__(root, shuffle, n_node)
@property
def raw_file_names(self) -> str:
if self.is_train:
return ['train.txt', 'all_train_seq.txt']
return ['test.txt', 'all_train_seq.txt']
def download(self):
download_url(self.all_train_seq_url, self.raw_dir)
if self.is_train:
download_url(self.train_url, self.raw_dir)
else:
download_url(self.test_url, self.raw_dir)
# Internal Cell
def to_list(value: Any) -> Sequence:
if isinstance(value, Sequence) and not isinstance(value, str):
return value
else:
return [value]
def files_exist(files: List[str]) -> bool:
# NOTE: We return `False` in case `files` is empty, leading to a
# re-processing of files on every instantiation.
return len(files) != 0 and all([osp.exists(f) for f in files])
# Cell
class RetailRocketDatasetv2(Dataset):
r"""Load and process RetailRocket dataset.
Args:
root (string): Root directory where the dataset should be saved.
process_method (string):
last: last day => test set
last_min_date: last day => test set, but from a minimal date onwards
days_test: last N days => test set
slice: create multiple train-test-combinations with a sliding window approach
min_date (string, optional): Minimum date
session_length (int, optional): Session time length :default = 30 * 60 #30 minutes
min_session_length (int, optional): Minimum number of items for a session to be valid
min_item_support (int, optional): Minimum number of interactions for an item to be valid
num_slices (int, optional): Offset in days from the first date in the data set
days_offset (int, optional): Number of days the training start date is shifted after creating one slice
days_shift (int, optional): Days shift
days_train (int, optional): Days in train set in each slice
days_test (int, optional): Days in test set in each slice
"""
url = 'https://github.com/RecoHut-Datasets/retail_rocket/raw/v2/retailrocket.zip'
def __init__(self, root, process_method, min_date='2015-09-02',
session_length=30*60, min_session_length=2, min_item_support=5,
num_slices=5, days_offset=0, days_shift=27, days_train=25, days_test=2):
super().__init__(root)
self.process_method = process_method
self.min_date = min_date
self.session_length = session_length
self.min_session_length = min_session_length
self.min_item_support = min_item_support
self.num_slices = num_slices
self.days_offset = days_offset
self.days_shift = days_shift
self.days_train = days_train
self.days_test = days_test
self.data = None
self.cart = None
self._process()
@property
def raw_file_names(self) -> str:
return 'events.csv'
@property
def processed_file_names(self) -> str:
return 'data.pkl'
def download(self):
path = download_url(self.url, self.raw_dir)
extract_zip(path, self.raw_dir)
from shutil import move, rmtree
move(osp.join(self.raw_dir, 'retailrocket', 'events.csv'),
osp.join(self.raw_dir, 'events.csv'))
rmtree(osp.join(self.raw_dir, 'retailrocket'))
os.unlink(path)
def load(self):
#load csv
data = pd.read_csv(osp.join(self.raw_dir,self.raw_file_names), sep=',',
header=0, usecols=[0,1,2,3],
dtype={0:np.int64, 1:np.int32, 2:str, 3:np.int32})
#specify header names
data.columns = ['Time','UserId','Type','ItemId']
data['Time'] = (data.Time / 1000).astype(int)
data.sort_values(['UserId','Time'], ascending=True, inplace=True)
#sessionize
data['TimeTmp'] = pd.to_datetime(data.Time, unit='s')
data.sort_values(['UserId','TimeTmp'], ascending=True, inplace=True)
data['TimeShift'] = data['TimeTmp'].shift(1)
data['TimeDiff'] = (data['TimeTmp'] - data['TimeShift']).dt.total_seconds().abs()
data['SessionIdTmp'] = (data['TimeDiff'] > self.session_length).astype(int)
data['SessionId'] = data['SessionIdTmp'].cumsum( skipna=False)
del data['SessionIdTmp'], data['TimeShift'], data['TimeDiff']
data.sort_values(['SessionId','Time'], ascending=True, inplace=True)
cart = data[data.Type == 'addtocart']
data = data[data.Type == 'view']
del data['Type']
# output
print(data.Time.min())
print(data.Time.max())
data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc)
del data['TimeTmp']
print('Loaded data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
self.cart = cart
def filter_data(self):
data = self.data
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)]
#filter item support
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>= self.min_item_support].index)]
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>= self.min_session_length].index)]
#output
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
def filter_min_date(self):
data = self.data
min_datetime = datetime.strptime(self.min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')
#filter
session_max_times = data.groupby('SessionId').Time.max()
session_keep = session_max_times[session_max_times > min_datetime.timestamp()].index
data = data[np.in1d(data.SessionId, session_keep)]
#output
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
def split_data_org(self):
data = self.data
tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(osp.join(self.processed_dir,'events_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(osp.join(self.processed_dir,'events_test.txt'), sep='\t', index=False)
tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(osp.join(self.processed_dir,'events_train_tr.txt'), sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(osp.join(self.processed_dir,'events_train_valid.txt'), sep='\t', index=False)
def split_data(self):
data = self.data
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
test_from = data_end - timedelta(self.days_test)
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < test_from.timestamp()].index
session_test = session_max_times[session_max_times >= test_from.timestamp()].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(osp.join(self.processed_dir,'events_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(osp.join(self.processed_dir,'events_test.txt'), sep='\t', index=False)
def slice_data(self):
for slice_id in range(0, self.num_slices):
self.split_data_slice(slice_id, self.days_offset+(slice_id*self.days_shift))
def split_data_slice(self, slice_id, days_offset):
data = self.data
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Full data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format(slice_id, len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.isoformat(), data_end.isoformat()))
start = datetime.fromtimestamp(data.Time.min(), timezone.utc ) + timedelta(days_offset)
middle = start + timedelta(self.days_train)
end = middle + timedelta(self.days_test)
#prefilter the timespan
session_max_times = data.groupby('SessionId').Time.max()
greater_start = session_max_times[session_max_times >= start.timestamp()].index
lower_end = session_max_times[session_max_times <= end.timestamp()].index
data_filtered = data[np.in1d(data.SessionId, greater_start.intersection(lower_end))]
print('Slice data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} / {}'.
format( slice_id, len(data_filtered), data_filtered.SessionId.nunique(), data_filtered.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat(), end.date().isoformat() ) )
#split to train and test
session_max_times = data_filtered.groupby('SessionId').Time.max()
sessions_train = session_max_times[session_max_times < middle.timestamp()].index
sessions_test = session_max_times[session_max_times >= middle.timestamp()].index
train = data[np.in1d(data.SessionId, sessions_train)]
print('Train set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format( slice_id, len(train), train.SessionId.nunique(), train.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat() ) )
train.to_csv(osp.join(self.processed_dir,'events_train_full.'+str(slice_id)+'.txt'), sep='\t', index=False)
test = data[np.in1d(data.SessionId, sessions_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Test set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} \n\n'.
format( slice_id, len(test), test.SessionId.nunique(), test.ItemId.nunique(), middle.date().isoformat(), end.date().isoformat() ) )
test.to_csv(osp.join(self.processed_dir,'events_test.'+str(slice_id)+'.txt'), sep='\t', index=False)
def store_buys(self):
self.cart.to_csv(osp.join(self.processed_dir,'events_buys.txt'), sep='\t', index=False)
def process(self):
self.load()
self.filter_data()
if self.process_method == 'last':
self.split_data_org()
elif self.process_method == 'last_min_date':
self.filter_min_date()
self.split_data_org()
elif self.process_method == 'days_test':
self.split_data()
elif self.process_method == 'slice':
self.slice_data()
self.store_buys()
|
[
"os.unlink",
"os.path.exists",
"datetime.datetime.strptime",
"pandas.to_datetime",
"datetime.timedelta",
"os.path.join",
"numpy.in1d"
] |
[((4482, 4497), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (4491, 4497), False, 'import os\n'), ((5013, 5048), 'pandas.to_datetime', 'pd.to_datetime', (['data.Time'], {'unit': '"""s"""'}), "(data.Time, unit='s')\n", (5027, 5048), True, 'import pandas as pd\n'), ((7368, 7435), 'datetime.datetime.strptime', 'datetime.strptime', (["(self.min_date + ' 00:00:00')", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(self.min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (7385, 7435), False, 'from datetime import timezone, datetime, timedelta\n'), ((4314, 4366), 'os.path.join', 'osp.join', (['self.raw_dir', '"""retailrocket"""', '"""events.csv"""'], {}), "(self.raw_dir, 'retailrocket', 'events.csv')\n", (4322, 4366), True, 'import os.path as osp\n'), ((4381, 4417), 'os.path.join', 'osp.join', (['self.raw_dir', '"""events.csv"""'], {}), "(self.raw_dir, 'events.csv')\n", (4389, 4417), True, 'import os.path as osp\n'), ((4434, 4472), 'os.path.join', 'osp.join', (['self.raw_dir', '"""retailrocket"""'], {}), "(self.raw_dir, 'retailrocket')\n", (4442, 4472), True, 'import os.path as osp\n'), ((4564, 4607), 'os.path.join', 'osp.join', (['self.raw_dir', 'self.raw_file_names'], {}), '(self.raw_dir, self.raw_file_names)\n', (4572, 4607), True, 'import os.path as osp\n'), ((6399, 6466), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_lengths[session_lengths > 1].index'], {}), '(data.SessionId, session_lengths[session_lengths > 1].index)\n', (6406, 6466), True, 'import numpy as np\n'), ((6570, 6656), 'numpy.in1d', 'np.in1d', (['data.ItemId', 'item_supports[item_supports >= self.min_item_support].index'], {}), '(data.ItemId, item_supports[item_supports >= self.min_item_support].\n index)\n', (6577, 6656), True, 'import numpy as np\n'), ((6763, 6858), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_lengths[session_lengths >= self.min_session_length].index'], {}), '(data.SessionId, session_lengths[session_lengths >= self.\n min_session_length].index)\n', (6770, 6858), True, 'import numpy as np\n'), ((7632, 7669), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_keep'], {}), '(data.SessionId, session_keep)\n', (7639, 7669), True, 'import numpy as np\n'), ((8437, 8475), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_train'], {}), '(data.SessionId, session_train)\n', (8444, 8475), True, 'import numpy as np\n'), ((8497, 8534), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_test'], {}), '(data.SessionId, session_test)\n', (8504, 8534), True, 'import numpy as np\n'), ((8556, 8590), 'numpy.in1d', 'np.in1d', (['test.ItemId', 'train.ItemId'], {}), '(test.ItemId, train.ItemId)\n', (8563, 8590), True, 'import numpy as np\n'), ((8664, 8718), 'numpy.in1d', 'np.in1d', (['test.SessionId', 'tslength[tslength >= 2].index'], {}), '(test.SessionId, tslength[tslength >= 2].index)\n', (8671, 8718), True, 'import numpy as np\n'), ((8884, 8937), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_full.txt"""'], {}), "(self.processed_dir, 'events_train_full.txt')\n", (8892, 8937), True, 'import os.path as osp\n'), ((9117, 9164), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_test.txt"""'], {}), "(self.processed_dir, 'events_test.txt')\n", (9125, 9164), True, 'import os.path as osp\n'), ((9473, 9512), 'numpy.in1d', 'np.in1d', (['train.SessionId', 'session_train'], {}), '(train.SessionId, session_train)\n', (9480, 9512), True, 'import numpy as np\n'), ((9536, 9575), 'numpy.in1d', 'np.in1d', (['train.SessionId', 'session_valid'], {}), '(train.SessionId, session_valid)\n', (9543, 9575), True, 'import numpy as np\n'), ((9599, 9637), 'numpy.in1d', 'np.in1d', (['valid.ItemId', 'train_tr.ItemId'], {}), '(valid.ItemId, train_tr.ItemId)\n', (9606, 9637), True, 'import numpy as np\n'), ((9714, 9769), 'numpy.in1d', 'np.in1d', (['valid.SessionId', 'tslength[tslength >= 2].index'], {}), '(valid.SessionId, tslength[tslength >= 2].index)\n', (9721, 9769), True, 'import numpy as np\n'), ((9942, 9993), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_tr.txt"""'], {}), "(self.processed_dir, 'events_train_tr.txt')\n", (9950, 9993), True, 'import os.path as osp\n'), ((10183, 10237), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_valid.txt"""'], {}), "(self.processed_dir, 'events_train_valid.txt')\n", (10191, 10237), True, 'import os.path as osp\n'), ((10417, 10442), 'datetime.timedelta', 'timedelta', (['self.days_test'], {}), '(self.days_test)\n', (10426, 10442), False, 'from datetime import timezone, datetime, timedelta\n'), ((10712, 10750), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_train'], {}), '(data.SessionId, session_train)\n', (10719, 10750), True, 'import numpy as np\n'), ((10772, 10809), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_test'], {}), '(data.SessionId, session_test)\n', (10779, 10809), True, 'import numpy as np\n'), ((10831, 10865), 'numpy.in1d', 'np.in1d', (['test.ItemId', 'train.ItemId'], {}), '(test.ItemId, train.ItemId)\n', (10838, 10865), True, 'import numpy as np\n'), ((10939, 10993), 'numpy.in1d', 'np.in1d', (['test.SessionId', 'tslength[tslength >= 2].index'], {}), '(test.SessionId, tslength[tslength >= 2].index)\n', (10946, 10993), True, 'import numpy as np\n'), ((11159, 11212), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_full.txt"""'], {}), "(self.processed_dir, 'events_train_full.txt')\n", (11167, 11212), True, 'import os.path as osp\n'), ((11392, 11439), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_test.txt"""'], {}), "(self.processed_dir, 'events_test.txt')\n", (11400, 11439), True, 'import os.path as osp\n'), ((12164, 12186), 'datetime.timedelta', 'timedelta', (['days_offset'], {}), '(days_offset)\n', (12173, 12186), False, 'from datetime import timezone, datetime, timedelta\n'), ((12213, 12239), 'datetime.timedelta', 'timedelta', (['self.days_train'], {}), '(self.days_train)\n', (12222, 12239), False, 'from datetime import timezone, datetime, timedelta\n'), ((12264, 12289), 'datetime.timedelta', 'timedelta', (['self.days_test'], {}), '(self.days_test)\n', (12273, 12289), False, 'from datetime import timezone, datetime, timedelta\n'), ((13257, 13296), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'sessions_train'], {}), '(data.SessionId, sessions_train)\n', (13264, 13296), True, 'import numpy as np\n'), ((13676, 13714), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'sessions_test'], {}), '(data.SessionId, sessions_test)\n', (13683, 13714), True, 'import numpy as np\n'), ((13736, 13770), 'numpy.in1d', 'np.in1d', (['test.ItemId', 'train.ItemId'], {}), '(test.ItemId, train.ItemId)\n', (13743, 13770), True, 'import numpy as np\n'), ((13845, 13899), 'numpy.in1d', 'np.in1d', (['test.SessionId', 'tslength[tslength >= 2].index'], {}), '(test.SessionId, tslength[tslength >= 2].index)\n', (13852, 13899), True, 'import numpy as np\n'), ((14300, 14347), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_buys.txt"""'], {}), "(self.processed_dir, 'events_buys.txt')\n", (14308, 14347), True, 'import os.path as osp\n'), ((1919, 1932), 'os.path.exists', 'osp.exists', (['f'], {}), '(f)\n', (1929, 1932), True, 'import os.path as osp\n')]
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load_module, reload_module, get_stdout, sanitize
exercise = 'src.calculator'
def parse_result(output):
if len(output) > 30:
return output[:30] + "..."
else:
return output
#add, multiply,subtract(-)
@points('1.calculator')
class CalculatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', return_value = '0'):
cls.module = load_module(exercise, 'en')
def test_add1(self):
with patch('builtins.input', side_effect = [ '1', '2', 'add', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '1 + 2 = 3'
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 1, 2, add")
self.assertTrue(expect in output, f"With inputs 1, 2, add your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_add2(self):
with patch('builtins.input', side_effect = [ '75', '23', 'add', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 75, 23, add")
expect = '75 + 23 = 98'
self.assertTrue(expect in output, f"With inputs 75, 23, add your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_subtract1(self):
with patch('builtins.input', side_effect = [ '2', '1', 'subtract', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 2, 1, subtract")
expect = '2 - 1 = 1'
self.assertTrue(expect in output, f"With inputs 2, 1, subtract your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_subtract2(self):
with patch('builtins.input', side_effect = [ '13', '34', 'subtract', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '13 - 34 = -21'
self.assertTrue(expect in output, f"With inputs 13, 34, subtract your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_multiply1(self):
with patch('builtins.input', side_effect = [ '2', '3', 'multiply', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '2 * 3 = 6'
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 2, 3, multiply")
self.assertTrue(expect in output, f"With inputs 2, 3, multiply your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_multiply2(self):
with patch('builtins.input', side_effect = [ '27', '-3', 'multiply', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '27 * -3 = -81'
self.assertTrue(expect in output, f"With inputs 27, -3, multiply your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_xcrap(self):
with patch('builtins.input', side_effect = [ '27', '-3', 'quotient', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
self.assertTrue(len(output) == 0, f"With inputs 27, -3, quotient your program should not print out anything\nYour program printed out:\n{output}")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"tmc.utils.get_stdout",
"unittest.mock.patch",
"tmc.utils.reload_module",
"tmc.points",
"tmc.utils.load_module"
] |
[((319, 341), 'tmc.points', 'points', (['"""1.calculator"""'], {}), "('1.calculator')\n", (325, 341), False, 'from tmc import points\n'), ((4027, 4042), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4040, 4042), False, 'import unittest\n'), ((438, 479), 'unittest.mock.patch', 'patch', (['"""builtins.input"""'], {'return_value': '"""0"""'}), "('builtins.input', return_value='0')\n", (443, 479), False, 'from unittest.mock import patch\n'), ((508, 535), 'tmc.utils.load_module', 'load_module', (['exercise', '"""en"""'], {}), "(exercise, 'en')\n", (519, 535), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((707, 733), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (720, 733), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((755, 767), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (765, 767), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((1239, 1265), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (1252, 1265), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((1287, 1299), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (1297, 1299), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((1786, 1812), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (1799, 1812), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((1834, 1846), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (1844, 1846), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((2338, 2364), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (2351, 2364), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((2386, 2398), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (2396, 2398), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((2780, 2806), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (2793, 2806), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((2828, 2840), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (2838, 2840), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((3335, 3361), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (3348, 3361), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((3383, 3395), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (3393, 3395), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((3775, 3801), 'tmc.utils.reload_module', 'reload_module', (['self.module'], {}), '(self.module)\n', (3788, 3801), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n'), ((3823, 3835), 'tmc.utils.get_stdout', 'get_stdout', ([], {}), '()\n', (3833, 3835), False, 'from tmc.utils import load_module, reload_module, get_stdout, sanitize\n')]
|
from django.contrib import admin
from inventory.models import Group, Host, Data
# Register your models here.
admin.site.register(Host)
admin.site.register(Group)
admin.site.register(Data)
|
[
"django.contrib.admin.site.register"
] |
[((110, 135), 'django.contrib.admin.site.register', 'admin.site.register', (['Host'], {}), '(Host)\n', (129, 135), False, 'from django.contrib import admin\n'), ((136, 162), 'django.contrib.admin.site.register', 'admin.site.register', (['Group'], {}), '(Group)\n', (155, 162), False, 'from django.contrib import admin\n'), ((163, 188), 'django.contrib.admin.site.register', 'admin.site.register', (['Data'], {}), '(Data)\n', (182, 188), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.StandardVoucherOpenApiVO import StandardVoucherOpenApiVO
class AlipayBossFncGffundStandardvoucherBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncGffundStandardvoucherBatchqueryResponse, self).__init__()
self._cur_page = None
self._page_size = None
self._standard_voucher_list = None
self._success = None
self._total_items = None
self._total_pages = None
@property
def cur_page(self):
return self._cur_page
@cur_page.setter
def cur_page(self, value):
self._cur_page = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def standard_voucher_list(self):
return self._standard_voucher_list
@standard_voucher_list.setter
def standard_voucher_list(self, value):
if isinstance(value, list):
self._standard_voucher_list = list()
for i in value:
if isinstance(i, StandardVoucherOpenApiVO):
self._standard_voucher_list.append(i)
else:
self._standard_voucher_list.append(StandardVoucherOpenApiVO.from_alipay_dict(i))
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
@property
def total_items(self):
return self._total_items
@total_items.setter
def total_items(self, value):
self._total_items = value
@property
def total_pages(self):
return self._total_pages
@total_pages.setter
def total_pages(self, value):
self._total_pages = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncGffundStandardvoucherBatchqueryResponse, self).parse_response_content(response_content)
if 'cur_page' in response:
self.cur_page = response['cur_page']
if 'page_size' in response:
self.page_size = response['page_size']
if 'standard_voucher_list' in response:
self.standard_voucher_list = response['standard_voucher_list']
if 'success' in response:
self.success = response['success']
if 'total_items' in response:
self.total_items = response['total_items']
if 'total_pages' in response:
self.total_pages = response['total_pages']
|
[
"alipay.aop.api.domain.StandardVoucherOpenApiVO.StandardVoucherOpenApiVO.from_alipay_dict"
] |
[((1387, 1431), 'alipay.aop.api.domain.StandardVoucherOpenApiVO.StandardVoucherOpenApiVO.from_alipay_dict', 'StandardVoucherOpenApiVO.from_alipay_dict', (['i'], {}), '(i)\n', (1428, 1431), False, 'from alipay.aop.api.domain.StandardVoucherOpenApiVO import StandardVoucherOpenApiVO\n')]
|
import pandas as pd
def deal():
# 列表
company_name_list = ['12312', '141', '515', '41']
# list转dataframe
df = pd.DataFrame(company_name_list)
# 保存到本地excel
df.to_csv("company_name_li.csv", index=False)
if __name__ == '__main__':
deal()
|
[
"pandas.DataFrame"
] |
[((128, 159), 'pandas.DataFrame', 'pd.DataFrame', (['company_name_list'], {}), '(company_name_list)\n', (140, 159), True, 'import pandas as pd\n')]
|
import os
import re
import pathlib
from setuptools import setup, find_packages
def get_long_description() -> str:
"""Converts relative repository links to absolute URLs
if GITHUB_REPOSITORY and GITHUB_SHA environment variables exist.
If not, it returns the raw content in README.md.
"""
raw_readme = pathlib.Path("README.md").read_text()
repository = os.environ.get("GITHUB_REPOSITORY")
sha = os.environ.get("GITHUB_SHA")
if repository is not None and sha is not None:
full_url = f"https://github.com/{repository}/blob/{sha}/"
return re.sub(r"]\((?!https)", "](" + full_url, raw_readme)
return raw_readme
# pylint: disable=line-too-long
setup(
name="webviz-dev-sync",
description="Developer tool for syncing webviz packages",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/equinor/webviz-dev-sync",
author="<NAME>",
packages=find_packages(exclude=["tests"]),
package_data={
"webviz_dev_sync": [
"py.typed",
]
},
entry_points={
"console_scripts": ["webviz-dev=webviz_dev_sync.command_line:main"],
},
install_requires=[
"gitpython>=3.1.18",
"jsonschema>=4.0.0",
"Pillow>=7.0.0",
"progress>=1.6",
"PyGithub>=1.55",
"pysimplegui>=4.55.1",
"pyyaml>=5.4.1",
"types-PyYAML>=5.4.1",
],
setup_requires=["setuptools_scm~=3.2"],
python_requires="~=3.6",
use_scm_version=True,
zip_safe=False,
project_urls={
"Documentation": "https://equinor.github.io/webviz-dev-sync",
"Download": "https://equinor.github.io/webviz-dev-sync",
"Source": "https://equinor.github.io/webviz-dev-sync",
"Tracker": "https://equinor.github.io/webviz-dev-sync/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Natural Language :: English",
"Environment :: Web Environment",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: MIT License",
],
)
|
[
"os.environ.get",
"pathlib.Path",
"re.sub",
"setuptools.find_packages"
] |
[((380, 415), 'os.environ.get', 'os.environ.get', (['"""GITHUB_REPOSITORY"""'], {}), "('GITHUB_REPOSITORY')\n", (394, 415), False, 'import os\n'), ((426, 454), 'os.environ.get', 'os.environ.get', (['"""GITHUB_SHA"""'], {}), "('GITHUB_SHA')\n", (440, 454), False, 'import os\n'), ((588, 640), 're.sub', 're.sub', (['"""]\\\\((?!https)"""', "('](' + full_url)", 'raw_readme'], {}), "(']\\\\((?!https)', '](' + full_url, raw_readme)\n", (594, 640), False, 'import re\n'), ((978, 1010), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (991, 1010), False, 'from setuptools import setup, find_packages\n'), ((324, 349), 'pathlib.Path', 'pathlib.Path', (['"""README.md"""'], {}), "('README.md')\n", (336, 349), False, 'import pathlib\n')]
|
# coding:utf-8
# @Time : 2019/5/15
# @Author : xuyouze
# @File Name : base_model.py
import importlib
import os
from abc import ABC, abstractmethod
from collections import OrderedDict
import torch
from torch import nn
from config.base_config import BaseConfig
from networks import *
class BaseModel(ABC):
def __init__(self, config: BaseConfig) -> None:
super().__init__()
self.config = config
self.net_names = []
self.optimizers = []
self.schedulers = []
self.save_path = config.checkpoints_dir
self.correct = None
self.output = None
self.attr = None
self.validate_size = None
self.pos_num = None
self.tnr = None
self.tpr = None
@abstractmethod
def set_input(self, x):
pass
@abstractmethod
def forward(self):
pass
def optimize_parameters(self):
self.forward()
for optimizer in self.optimizers:
optimizer.zero_grad()
self.backward()
for optimizer in self.optimizers:
optimizer.step()
def backward(self):
for name in self.net_names:
setattr(self, "loss_%s" % name,
getattr(self, "criterion_%s" % name)(getattr(self, "output_%s" % name),
getattr(self,
"attr_%s" % name)).cuda())
getattr(self, "loss_%s" % name).backward()
def setup(self):
"""
setup the network
if Train:
set the optimizer
else:
load the pre-training models
:return:
"""
print('-----------------------------------------------')
if self.config.isTrain:
self.schedulers = [get_scheduler(optimizer, self.config) for optimizer in self.optimizers]
if not self.config.isTrain or self.config.continue_train:
load_prefix = "iter_%d" % self.config.load_iter if self.config.load_iter > 0 else self.config.last_epoch
self.load_networks(load_prefix)
self.print_networks()
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
def get_current_loss(self):
errors_map = OrderedDict()
for name in self.net_names:
if isinstance(name, str):
errors_map[name] = float(getattr(self, "loss_" + name))
return errors_map
def eval(self):
for name in self.net_names:
if isinstance(name, str):
net = getattr(self, "net_" + name)
net.eval()
def train(self):
for name in self.net_names:
if isinstance(name, str):
net = getattr(self, "net_" + name)
net.train()
def save_networks(self, epoch_prefix):
for name in self.net_names:
if isinstance(name, str):
save_filename = "%s_net_%s.pth" % (epoch_prefix, name)
save_path = os.path.join(self.save_path, save_filename)
net = getattr(self, "net_" + name)
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda()
def load_networks(self, epoch_prefix):
for name in self.net_names:
if isinstance(name, str):
load_filename = "%s_net_%s.pth" % (epoch_prefix, name)
load_path = os.path.join(self.save_path, load_filename)
net = getattr(self, "net_" + name)
if isinstance(net, nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
state_dict = torch.load(load_path)
net.load_state_dict(state_dict)
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def print_networks(self):
for name in self.net_names:
if isinstance(name, str):
net = getattr(self, "net_" + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
self.config.logger.info('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
def get_learning_rate(self):
return self.optimizers[0].param_groups[0]["lr"]
def test(self):
with torch.no_grad():
self.forward()
self.output = torch.zeros(self.attr.size(0), self.config.dataset_config.attribute_num)
for name in self.net_names:
self.output[:, getattr(self, "attr_%s_index" % name)] = getattr(self, "output_%s" % name).cpu()
com1 = self.output > 0.5
com2 = self.attr > 0
# class_balance accuracy
accuracy = com1.eq(com2)
self.pos_num.add_(com2.sum(0).float())
tpr = (accuracy & (com2 > 0)).sum(0).float()
tnr = (accuracy & (com2 < 1)).sum(0).float()
self.tpr.add_(tpr)
self.tnr.add_(tnr)
# mean accuracy
mean_accuracy = accuracy.sum(0).float()
self.correct.add_(mean_accuracy)
def get_model_precision(self):
return self.correct / self.validate_size
def get_model_class_balance_precision(self):
return 1 / 2 * (self.tpr / self.pos_num + self.tnr / (self.get_validate_size() - self.pos_num))
def clear_precision(self):
self.correct = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
self.tpr = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
self.tnr = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
self.pos_num = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
def create_network_model(self):
return create_network_model(self.config)
def set_validate_size(self, validate_size: int):
self.validate_size = validate_size
def get_validate_size(self):
if self.validate_size:
return self.validate_size
else:
return 0
|
[
"torch.load",
"torch.FloatTensor",
"collections.OrderedDict",
"torch.no_grad",
"os.path.join"
] |
[((1919, 1932), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1930, 1932), False, 'from collections import OrderedDict\n'), ((3766, 3781), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3779, 3781), False, 'import torch\n'), ((2512, 2555), 'os.path.join', 'os.path.join', (['self.save_path', 'save_filename'], {}), '(self.save_path, save_filename)\n', (2524, 2555), False, 'import os\n'), ((2842, 2885), 'os.path.join', 'os.path.join', (['self.save_path', 'load_filename'], {}), '(self.save_path, load_filename)\n', (2854, 2885), False, 'import os\n'), ((3056, 3077), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (3066, 3077), False, 'import torch\n'), ((4678, 4737), 'torch.FloatTensor', 'torch.FloatTensor', (['self.config.dataset_config.attribute_num'], {}), '(self.config.dataset_config.attribute_num)\n', (4695, 4737), False, 'import torch\n'), ((4760, 4819), 'torch.FloatTensor', 'torch.FloatTensor', (['self.config.dataset_config.attribute_num'], {}), '(self.config.dataset_config.attribute_num)\n', (4777, 4819), False, 'import torch\n'), ((4842, 4901), 'torch.FloatTensor', 'torch.FloatTensor', (['self.config.dataset_config.attribute_num'], {}), '(self.config.dataset_config.attribute_num)\n', (4859, 4901), False, 'import torch\n'), ((4928, 4987), 'torch.FloatTensor', 'torch.FloatTensor', (['self.config.dataset_config.attribute_num'], {}), '(self.config.dataset_config.attribute_num)\n', (4945, 4987), False, 'import torch\n')]
|
"""This module contains functions for converting LaTeX and Markdown files"""
import string
import random
import os
import multiprocessing
import time
from markdown import markdown
import pdfkit
MAX_WAIT_TIME = 3
POLLING_RATE = 10
def try_create_tempdir():
os.makedirs(os.getcwd() + "/TEMP", exist_ok=True)
def generate_random_name(size=120, alphabet=string.ascii_letters):
"""This fuction generates a random name from file. The length and alphabet can be changed"""
return ''.join(random.choice(alphabet) for _ in range(size))
def render_markdown(code):
"""Returns path to generated PDF or None"""
filename = 'TEMP/' + generate_random_name() + '.pdf'
try:
try_create_tempdir()
html_text = markdown(code, output_format='html4')
pdfkit.from_string(html_text, filename)
except Exception:
return None
return filename
def __run_pdflatex(code, send_end):
"""Sets path to generated PDF or None"""
filename = 'TEMP/' + generate_random_name()
tex_file_path = filename + '.tex'
try_create_tempdir()
with open(tex_file_path, 'w') as tex_file:
tex_file.write(code)
try:
command = f'pdflatex -output-directory=TEMP {tex_file_path} > /dev/null'
os.system(command)
send_end.send(filename + '.pdf')
except Exception:
send_end.send(None)
def render_latex(code):
"""Returns path to generated PDF or None"""
pdf_file_path = None
recv_end, send_end = multiprocessing.Pipe(False)
proc = multiprocessing.Process(
target=__run_pdflatex, args=(code, send_end))
proc.start()
for _ in range(POLLING_RATE * MAX_WAIT_TIME):
if proc.is_alive():
time.sleep(1 / POLLING_RATE)
else:
pdf_file_path = recv_end.recv()
break
if proc.is_alive():
proc.kill()
proc.join()
return pdf_file_path
|
[
"os.getcwd",
"os.system",
"random.choice",
"markdown.markdown",
"time.sleep",
"multiprocessing.Pipe",
"pdfkit.from_string",
"multiprocessing.Process"
] |
[((1493, 1520), 'multiprocessing.Pipe', 'multiprocessing.Pipe', (['(False)'], {}), '(False)\n', (1513, 1520), False, 'import multiprocessing\n'), ((1532, 1601), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': '__run_pdflatex', 'args': '(code, send_end)'}), '(target=__run_pdflatex, args=(code, send_end))\n', (1555, 1601), False, 'import multiprocessing\n'), ((739, 776), 'markdown.markdown', 'markdown', (['code'], {'output_format': '"""html4"""'}), "(code, output_format='html4')\n", (747, 776), False, 'from markdown import markdown\n'), ((785, 824), 'pdfkit.from_string', 'pdfkit.from_string', (['html_text', 'filename'], {}), '(html_text, filename)\n', (803, 824), False, 'import pdfkit\n'), ((1259, 1277), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1268, 1277), False, 'import os\n'), ((277, 288), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (286, 288), False, 'import os\n'), ((500, 523), 'random.choice', 'random.choice', (['alphabet'], {}), '(alphabet)\n', (513, 523), False, 'import random\n'), ((1719, 1747), 'time.sleep', 'time.sleep', (['(1 / POLLING_RATE)'], {}), '(1 / POLLING_RATE)\n', (1729, 1747), False, 'import time\n')]
|
#!/usr/bin/env python
import sys
print_lines = False
with open("jniproxy.c", "r") as f:
for line in f:
if line.strip().endswith("*/"):
sys.exit(0);
if print_lines:
print(line[4:-1])
elif line.strip().startswith("/*"):
print_lines = True
|
[
"sys.exit"
] |
[((161, 172), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (169, 172), False, 'import sys\n')]
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 17:40, 06/11/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from mealpy.bio_based import SMA
import numpy as np
def obj_function(solution):
def booth(x, y):
return (x + 2*y - 7)**2 + (2*x + y - 5)**2
def bukin(x, y):
return 100 * np.sqrt(np.abs(y - 0.01 * x**2)) + 0.01 * np.abs(x + 10)
def matyas(x, y):
return 0.26 * (x**2 + y**2) - 0.48 * x * y
return [booth(solution[0], solution[1]), bukin(solution[0], solution[1]), matyas(solution[0], solution[1])]
problem_dict1 = {
"obj_func": obj_function,
"lb": [-10, -10],
"ub": [10, 10],
"minmax": "min",
"verbose": True,
"obj_weight": [0.4, 0.1, 0.5] # Define it or default value will be [1, 1, 1]
}
## Run the algorithm
model1 = SMA.BaseSMA(problem_dict1, epoch=100, pop_size=50, pr=0.03)
model1.solve()
## You can access them all via object "history" like this:
model1.history.save_global_objectives_chart(filename="hello/goc")
model1.history.save_local_objectives_chart(filename="hello/loc")
model1.history.save_global_best_fitness_chart(filename="hello/gbfc")
model1.history.save_local_best_fitness_chart(filename="hello/lbfc")
model1.history.save_runtime_chart(filename="hello/rtc")
model1.history.save_exploration_exploitation_chart(filename="hello/eec")
model1.history.save_diversity_chart(filename="hello/dc")
model1.history.save_trajectory_chart(list_agent_idx=[3, 5], list_dimensions=[2], filename="hello/tc")
|
[
"numpy.abs",
"mealpy.bio_based.SMA.BaseSMA"
] |
[((1451, 1510), 'mealpy.bio_based.SMA.BaseSMA', 'SMA.BaseSMA', (['problem_dict1'], {'epoch': '(100)', 'pop_size': '(50)', 'pr': '(0.03)'}), '(problem_dict1, epoch=100, pop_size=50, pr=0.03)\n', (1462, 1510), False, 'from mealpy.bio_based import SMA\n'), ((987, 1001), 'numpy.abs', 'np.abs', (['(x + 10)'], {}), '(x + 10)\n', (993, 1001), True, 'import numpy as np\n'), ((953, 978), 'numpy.abs', 'np.abs', (['(y - 0.01 * x ** 2)'], {}), '(y - 0.01 * x ** 2)\n', (959, 978), True, 'import numpy as np\n')]
|
#!/usr/bin/python
from config import config
import commonFunctions
import datetime
import traceback
import httplib
import json
import sys
import logging
from time import sleep
logging.basicConfig(level=config.get_logging_level(),
format=config.runtime_variables['log_format'],
datefmt=config.runtime_variables['log_date_format'])
def get_current_city_data():
http_connection = httplib.HTTPSConnection(config.open_map['host'])
http_connection.request("GET", ("%s?q=%s&units=%s&appid=%s" % (config.open_map['path'], config.open_map['city'], 'metric', config.open_map['api_key'])))
response = http_connection.getresponse()
#if (response.status != httplib.OK):
# print 'Error ocurred'
# print response.status, response.reason
# return None #Replace this with an exception
#else:
jsondata = response.read()
data = json.loads(jsondata)
return data
def save_openweather_map_info_to_DB(json_data, creation_time):
current_place = commonFunctions.get_from_dic(json_data, 'name')
place = "city_%s" % current_place
measurement_date = commonFunctions.get_from_dic(json_data, 'dt')
current_temperature = commonFunctions.get_from_dic(json_data, 'main', 'temp')
current_pressure = commonFunctions.get_from_dic(json_data, 'main', 'pressure')
current_humidity = commonFunctions.get_from_dic(json_data, 'main', 'humidity')
current_temperature_min = commonFunctions.get_from_dic(json_data, 'main', 'temp_min')
current_temperature_max = commonFunctions.get_from_dic(json_data, 'main', 'temp_max')
current_rain = commonFunctions.get_from_dic(json_data, 'rain', '3h')
current_visibility = commonFunctions.get_from_dic(json_data, 'visibility')
current_wind_speed = commonFunctions.get_from_dic(json_data, 'wind', 'speed')
current_wind_direction = commonFunctions.get_from_dic(json_data, 'wind', 'deg')
current_clouds = commonFunctions.get_from_dic(json_data, 'clouds', 'all')
current_sunrise = commonFunctions.get_from_dic(json_data, 'sys', 'sunrise')
current_sunset = commonFunctions.get_from_dic(json_data, 'sys', 'sunset')
commonFunctions.save_temperature_data(place, current_temperature, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_pressure_data(place, current_pressure, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_humidity_data(place, current_humidity, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_temperature_range_min_data(place, current_temperature_min, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_temperature_range_max_data(place, current_temperature_max, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_rain_data(place, current_rain, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_visibility_data(place, current_visibility, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_wind_data(place, current_wind_speed, current_wind_direction, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_clouds_data(place, current_clouds, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_sunrise_data(place, current_sunrise, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_sunset_data(place, current_sunset, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
def main():
logging.info("Saving to file: %s" % (config.file['save_to_file']))
logging.info("Saving to DB: %s" % (config.mysql['save_to_DB']))
logging.info("Starting loop")
try:
while True:
now = datetime.datetime.utcnow()
openweathermap_jsondata = get_current_city_data()
save_openweather_map_info_to_DB(openweathermap_jsondata, now.isoformat())
sleep(config.open_map['sleep_time_in_seconds_between_reads'])
except KeyboardInterrupt:
logging.error("\nbye!")
sys.exit(1)
except Exception as e:
logging.error("\nOther error occurred")
logging.error (e)
logging.error(traceback.format_exc())
sys.exit(1)
finally:
logging.info("\nbye2!")
#print("\nCleaning GPIO port\n")
#GPIO.cleanup()
# call main
if __name__ == '__main__':
main()
|
[
"commonFunctions.save_sunrise_data",
"commonFunctions.save_temperature_data",
"commonFunctions.save_wind_data",
"commonFunctions.get_from_dic",
"commonFunctions.save_temperature_range_max_data",
"datetime.datetime.utcnow",
"commonFunctions.save_clouds_data",
"logging.error",
"json.loads",
"commonFunctions.save_humidity_data",
"traceback.format_exc",
"commonFunctions.save_rain_data",
"httplib.HTTPSConnection",
"commonFunctions.save_sunset_data",
"commonFunctions.save_visibility_data",
"time.sleep",
"commonFunctions.save_temperature_range_min_data",
"sys.exit",
"logging.info",
"config.config.get_logging_level",
"commonFunctions.save_pressure_data"
] |
[((425, 473), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (["config.open_map['host']"], {}), "(config.open_map['host'])\n", (448, 473), False, 'import httplib\n'), ((902, 922), 'json.loads', 'json.loads', (['jsondata'], {}), '(jsondata)\n', (912, 922), False, 'import json\n'), ((1023, 1070), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""name"""'], {}), "(json_data, 'name')\n", (1051, 1070), False, 'import commonFunctions\n'), ((1132, 1177), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""dt"""'], {}), "(json_data, 'dt')\n", (1160, 1177), False, 'import commonFunctions\n'), ((1205, 1260), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""main"""', '"""temp"""'], {}), "(json_data, 'main', 'temp')\n", (1233, 1260), False, 'import commonFunctions\n'), ((1284, 1343), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""main"""', '"""pressure"""'], {}), "(json_data, 'main', 'pressure')\n", (1312, 1343), False, 'import commonFunctions\n'), ((1367, 1426), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""main"""', '"""humidity"""'], {}), "(json_data, 'main', 'humidity')\n", (1395, 1426), False, 'import commonFunctions\n'), ((1457, 1516), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""main"""', '"""temp_min"""'], {}), "(json_data, 'main', 'temp_min')\n", (1485, 1516), False, 'import commonFunctions\n'), ((1547, 1606), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""main"""', '"""temp_max"""'], {}), "(json_data, 'main', 'temp_max')\n", (1575, 1606), False, 'import commonFunctions\n'), ((1626, 1679), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""rain"""', '"""3h"""'], {}), "(json_data, 'rain', '3h')\n", (1654, 1679), False, 'import commonFunctions\n'), ((1705, 1758), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""visibility"""'], {}), "(json_data, 'visibility')\n", (1733, 1758), False, 'import commonFunctions\n'), ((1784, 1840), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""wind"""', '"""speed"""'], {}), "(json_data, 'wind', 'speed')\n", (1812, 1840), False, 'import commonFunctions\n'), ((1870, 1924), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""wind"""', '"""deg"""'], {}), "(json_data, 'wind', 'deg')\n", (1898, 1924), False, 'import commonFunctions\n'), ((1946, 2002), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""clouds"""', '"""all"""'], {}), "(json_data, 'clouds', 'all')\n", (1974, 2002), False, 'import commonFunctions\n'), ((2025, 2082), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""sys"""', '"""sunrise"""'], {}), "(json_data, 'sys', 'sunrise')\n", (2053, 2082), False, 'import commonFunctions\n'), ((2104, 2160), 'commonFunctions.get_from_dic', 'commonFunctions.get_from_dic', (['json_data', '"""sys"""', '"""sunset"""'], {}), "(json_data, 'sys', 'sunset')\n", (2132, 2160), False, 'import commonFunctions\n'), ((2168, 2293), 'commonFunctions.save_temperature_data', 'commonFunctions.save_temperature_data', (['place', 'current_temperature', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_temperature, \n 'FROM_UNIXTIME(%s)' % measurement_date, creation_time)\n", (2205, 2293), False, 'import commonFunctions\n'), ((2295, 2414), 'commonFunctions.save_pressure_data', 'commonFunctions.save_pressure_data', (['place', 'current_pressure', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_pressure, \n 'FROM_UNIXTIME(%s)' % measurement_date, creation_time)\n", (2329, 2414), False, 'import commonFunctions\n'), ((2416, 2535), 'commonFunctions.save_humidity_data', 'commonFunctions.save_humidity_data', (['place', 'current_humidity', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_humidity, \n 'FROM_UNIXTIME(%s)' % measurement_date, creation_time)\n", (2450, 2535), False, 'import commonFunctions\n'), ((2537, 2679), 'commonFunctions.save_temperature_range_min_data', 'commonFunctions.save_temperature_range_min_data', (['place', 'current_temperature_min', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place,\n current_temperature_min, 'FROM_UNIXTIME(%s)' % measurement_date,\n creation_time)\n", (2584, 2679), False, 'import commonFunctions\n'), ((2678, 2820), 'commonFunctions.save_temperature_range_max_data', 'commonFunctions.save_temperature_range_max_data', (['place', 'current_temperature_max', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place,\n current_temperature_max, 'FROM_UNIXTIME(%s)' % measurement_date,\n creation_time)\n", (2725, 2820), False, 'import commonFunctions\n'), ((2819, 2929), 'commonFunctions.save_rain_data', 'commonFunctions.save_rain_data', (['place', 'current_rain', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_rain, 'FROM_UNIXTIME(%s)' %\n measurement_date, creation_time)\n", (2849, 2929), False, 'import commonFunctions\n'), ((2932, 3055), 'commonFunctions.save_visibility_data', 'commonFunctions.save_visibility_data', (['place', 'current_visibility', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_visibility, \n 'FROM_UNIXTIME(%s)' % measurement_date, creation_time)\n", (2968, 3055), False, 'import commonFunctions\n'), ((3057, 3201), 'commonFunctions.save_wind_data', 'commonFunctions.save_wind_data', (['place', 'current_wind_speed', 'current_wind_direction', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_wind_speed,\n current_wind_direction, 'FROM_UNIXTIME(%s)' % measurement_date,\n creation_time)\n", (3087, 3201), False, 'import commonFunctions\n'), ((3200, 3314), 'commonFunctions.save_clouds_data', 'commonFunctions.save_clouds_data', (['place', 'current_clouds', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_clouds, 'FROM_UNIXTIME(%s)' %\n measurement_date, creation_time)\n", (3232, 3314), False, 'import commonFunctions\n'), ((3317, 3434), 'commonFunctions.save_sunrise_data', 'commonFunctions.save_sunrise_data', (['place', 'current_sunrise', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_sunrise, \n 'FROM_UNIXTIME(%s)' % measurement_date, creation_time)\n", (3350, 3434), False, 'import commonFunctions\n'), ((3436, 3550), 'commonFunctions.save_sunset_data', 'commonFunctions.save_sunset_data', (['place', 'current_sunset', "('FROM_UNIXTIME(%s)' % measurement_date)", 'creation_time'], {}), "(place, current_sunset, 'FROM_UNIXTIME(%s)' %\n measurement_date, creation_time)\n", (3468, 3550), False, 'import commonFunctions\n'), ((3566, 3630), 'logging.info', 'logging.info', (["('Saving to file: %s' % config.file['save_to_file'])"], {}), "('Saving to file: %s' % config.file['save_to_file'])\n", (3578, 3630), False, 'import logging\n'), ((3637, 3698), 'logging.info', 'logging.info', (["('Saving to DB: %s' % config.mysql['save_to_DB'])"], {}), "('Saving to DB: %s' % config.mysql['save_to_DB'])\n", (3649, 3698), False, 'import logging\n'), ((3705, 3734), 'logging.info', 'logging.info', (['"""Starting loop"""'], {}), "('Starting loop')\n", (3717, 3734), False, 'import logging\n'), ((205, 231), 'config.config.get_logging_level', 'config.get_logging_level', ([], {}), '()\n', (229, 231), False, 'from config import config\n'), ((4307, 4330), 'logging.info', 'logging.info', (['"""\nbye2!"""'], {}), "('\\nbye2!')\n", (4319, 4330), False, 'import logging\n'), ((3782, 3808), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3806, 3808), False, 'import datetime\n'), ((3975, 4036), 'time.sleep', 'sleep', (["config.open_map['sleep_time_in_seconds_between_reads']"], {}), "(config.open_map['sleep_time_in_seconds_between_reads'])\n", (3980, 4036), False, 'from time import sleep\n'), ((4075, 4098), 'logging.error', 'logging.error', (['"""\nbye!"""'], {}), "('\\nbye!')\n", (4088, 4098), False, 'import logging\n'), ((4107, 4118), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4115, 4118), False, 'import sys\n'), ((4154, 4196), 'logging.error', 'logging.error', (['"""\nOther error occurred"""'], {}), '("""\nOther error occurred""")\n', (4167, 4196), False, 'import logging\n'), ((4202, 4218), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (4215, 4218), False, 'import logging\n'), ((4274, 4285), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4282, 4285), False, 'import sys\n'), ((4242, 4264), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4262, 4264), False, 'import traceback\n')]
|
from keras.utils import multi_gpu_model
import numpy as np
import tensorflow as tf
import pickle
from keras.models import Model, Input
from keras.optimizers import Adam, RMSprop
from keras.layers import Dense
from keras.layers import Conv2D, Conv2DTranspose
from keras.layers import Flatten, Add
from keras.layers import Concatenate, Activation
from keras.layers import LeakyReLU, BatchNormalization, Lambda
from keras import backend as K
import os
def accw(y_true, y_pred):
y_pred=K.clip(y_pred, -1, 1)
return K.mean(K.equal(y_true, K.round(y_pred)))
def mssim(y_true, y_pred):
costs = 1.0 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, 2.0))
return costs
def wloss(y_true,y_predict):
return -K.mean(y_true*y_predict)
def discriminator(inp_shape = (256,256,1), trainable = True):
gamma_init = tf.random_normal_initializer(1., 0.02)
inp = Input(shape = (256,256,1))
l0 = Conv2D(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp) #b_init is set to none, maybe they are not using bias here, but I am.
l0 = LeakyReLU(alpha=0.2)(l0)
l1 = Conv2D(64*2, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l0)
l1 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l1)
l1 = LeakyReLU(alpha=0.2)(l1)
l2 = Conv2D(64*4, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l1)
l2 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l2)
l2 = LeakyReLU(alpha=0.2)(l2)
l3 = Conv2D(64*8, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l2)
l3 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l3)
l3 = LeakyReLU(alpha=0.2)(l3)
l4 = Conv2D(64*16, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l3)
l4 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l4)
l4 = LeakyReLU(alpha=0.2)(l4)
l5 = Conv2D(64*32, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l4)
l5 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l5)
l5 = LeakyReLU(alpha=0.2)(l5)
l6 = Conv2D(64*16, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l5)
l6 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l6)
l6 = LeakyReLU(alpha=0.2)(l6)
l7 = Conv2D(64*8, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l6)
l7 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l7)
l7 = LeakyReLU(alpha=0.2)(l7)
#x
l8 = Conv2D(64*2, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l7)
l8 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l8)
l8 = LeakyReLU(alpha=0.2)(l8)
l9 = Conv2D(64*2, (3,3), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l8)
l9 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l9)
l9 = LeakyReLU(alpha=0.2)(l9)
l10 = Conv2D(64*8, (3,3), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l9)
l10 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l10)
l10 = LeakyReLU(alpha=0.2)(l10)
#y
l11 = Add()([l7,l10])
l11 = LeakyReLU(alpha = 0.2)(l11)
out=Conv2D(filters=1,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l11)
model = Model(inputs = inp, outputs = out)
return model
def resden(x,fil,gr,beta,gamma_init,trainable):
x1=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x)
x1=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x1)
x1=LeakyReLU(alpha=0.2)(x1)
x1=Concatenate(axis=-1)([x,x1])
x2=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x1)
x2=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x2)
x2=LeakyReLU(alpha=0.2)(x2)
x2=Concatenate(axis=-1)([x1,x2])
x3=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x2)
x3=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x3)
x3=LeakyReLU(alpha=0.2)(x3)
x3=Concatenate(axis=-1)([x2,x3])
x4=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x3)
x4=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x4)
x4=LeakyReLU(alpha=0.2)(x4)
x4=Concatenate(axis=-1)([x3,x4])
x5=Conv2D(filters=fil,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x4)
x5=Lambda(lambda x:x*beta)(x5)
xout=Add()([x5,x])
return xout
def resresden(x,fil,gr,betad,betar,gamma_init,trainable):
x1=resden(x,fil,gr,betad,gamma_init,trainable)
x2=resden(x1,fil,gr,betad,gamma_init,trainable)
x3=resden(x2,fil,gr,betad,gamma_init,trainable)
x3=Lambda(lambda x:x*betar)(x3)
xout=Add()([x3,x])
return xout
def generator(inp_shape, trainable = True):
gamma_init = tf.random_normal_initializer(1., 0.02)
fd=512
gr=32
nb=12
betad=0.2
betar=0.2
inp_real_imag = Input(inp_shape)
lay_128dn = Conv2D(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp_real_imag)
lay_128dn = LeakyReLU(alpha = 0.2)(lay_128dn)
lay_64dn = Conv2D(128, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_128dn)
lay_64dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_64dn)
lay_64dn = LeakyReLU(alpha = 0.2)(lay_64dn)
lay_32dn = Conv2D(256, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_64dn)
lay_32dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_32dn)
lay_32dn = LeakyReLU(alpha=0.2)(lay_32dn)
lay_16dn = Conv2D(512, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_32dn)
lay_16dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_16dn)
lay_16dn = LeakyReLU(alpha=0.2)(lay_16dn) #16x16
lay_8dn = Conv2D(512, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_16dn)
lay_8dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_8dn)
lay_8dn = LeakyReLU(alpha=0.2)(lay_8dn) #8x8
xc1=Conv2D(filters=fd,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_8dn) #8x8
xrrd=xc1
for m in range(nb):
xrrd=resresden(xrrd,fd,gr,betad,betar,gamma_init,trainable)
xc2=Conv2D(filters=fd,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(xrrd)
lay_8upc=Add()([xc1,xc2])
lay_16up = Conv2DTranspose(1024, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_8upc)
lay_16up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_16up)
lay_16up = Activation('relu')(lay_16up) #16x16
lay_16upc = Concatenate(axis = -1)([lay_16up,lay_16dn])
lay_32up = Conv2DTranspose(256, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_16upc)
lay_32up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_32up)
lay_32up = Activation('relu')(lay_32up) #32x32
lay_32upc = Concatenate(axis = -1)([lay_32up,lay_32dn])
lay_64up = Conv2DTranspose(128, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_32upc)
lay_64up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_64up)
lay_64up = Activation('relu')(lay_64up) #64x64
lay_64upc = Concatenate(axis = -1)([lay_64up,lay_64dn])
lay_128up = Conv2DTranspose(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_64upc)
lay_128up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_128up)
lay_128up = Activation('relu')(lay_128up) #128x128
lay_128upc = Concatenate(axis = -1)([lay_128up,lay_128dn])
lay_256up = Conv2DTranspose(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_128upc)
lay_256up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_256up)
lay_256up = Activation('relu')(lay_256up) #256x256
out = Conv2D(1, (1,1), strides = (1,1), activation = 'tanh', padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_256up)
model = Model(inputs = inp_real_imag, outputs = out)
return model
def define_gan_model(gen_model, dis_model, inp_shape):
dis_model.trainable = False
inp = Input(shape = inp_shape)
out_g = gen_model(inp)
out_dis = dis_model(out_g)
out_g1 = out_g
model = Model(inputs = inp, outputs = [out_dis, out_g, out_g1])
model.summary()
return model
def train(g_par, d_par, gan_model, dataset_real, u_sampled_data, n_epochs, n_batch, n_critic, clip_val, n_patch, f):
bat_per_epo = int(dataset_real.shape[0]/n_batch)
half_batch = int(n_batch/2)
for i in range(n_epochs):
for j in range(bat_per_epo):
# training the discriminator
for k in range(n_critic):
ix = np.random.randint(0, dataset_real.shape[0], half_batch)
X_real = dataset_real[ix]
y_real = np.ones((half_batch,n_patch,n_patch,1))
ix_1 = np.random.randint(0, u_sampled_data.shape[0], half_batch)
X_fake = g_par.predict(u_sampled_data[ix_1])
y_fake = -np.ones((half_batch,n_patch,n_patch,1))
X, y = np.vstack((X_real, X_fake)), np.vstack((y_real,y_fake))
d_loss, accuracy = d_par.train_on_batch(X,y)
for l in d_par.layers:
weights=l.get_weights()
weights=[np.clip(w, -clip_val,clip_val) for w in weights]
l.set_weights(weights)
# training the generator
ix = np.random.randint(0, dataset_real.shape[0], n_batch)
X_r = dataset_real[ix]
X_gen_inp = u_sampled_data[ix]
y_gan = np.ones((n_batch,n_patch,n_patch,1))
g_loss = gan_model.train_on_batch ([X_gen_inp], [y_gan, X_r, X_r])
f.write('>%d, %d/%d, d=%.3f, acc = %.3f, w=%.3f, mae=%.3f, mssim=%.3f, g=%.3f' %(i+1, j+1, bat_per_epo, d_loss, accuracy, g_loss[1], g_loss[2], g_loss[3], g_loss[0]))
f.write('\n')
print ('>%d, %d/%d, d=%.3f, acc = %.3f, g=%.3f' %(i+1, j+1, bat_per_epo, d_loss, accuracy, g_loss[0]))
filename = '/home/cs-mri-gan/gen_weights_a5_%04d.h5' % (i+1)
g_save = g_par.get_layer('model_3')
g_save.save_weights(filename)
f.close()
#hyperparameters
n_epochs = 300
n_batch = 32
n_critic = 3
clip_val = 0.05
in_shape_gen = (256,256,2)
in_shape_dis = (256,256,1)
accel = 3
d_model = discriminator (inp_shape = in_shape_dis, trainable = True)
d_model.summary()
d_par = multi_gpu_model(d_model, gpus=4, cpu_relocation = True) #for multi-gpu training
opt = Adam(lr = 0.0002, beta_1 = 0.5)
d_par.compile(loss = wloss, optimizer = opt, metrics = [accw])
g_model = generator(inp_shape = in_shape_gen , trainable = True)
g_par = multi_gpu_model(g_model, gpus=4, cpu_relocation = True) #for multi-gpu training
g_par.summary()
gan_model = define_gan_model(g_par, d_par, in_shape_gen)
opt1 = Adam(lr = 0.0001, beta_1 = 0.5)
gan_model.compile(loss = [wloss, 'mae', mssim], optimizer = opt1, loss_weights = [0.01, 20.0, 1.0]) #loss weights for generator training
n_patch=d_model.output_shape[1]
data_path='/home/cs-mri-gan/training_gt_aug.pickle' #Ground truth
usam_path='/home/cs-mri-gan/training_usamp_1dg_a5_aug.pickle' #Zero-filled reconstructions
df = open(data_path,'rb')
uf = open(usam_path,'rb')
dataset_real = pickle.load(df)
u_sampled_data = pickle.load(uf)
dataset_real = np.expand_dims(dataset_real, axis = -1)
u_sampled_data = np.expand_dims(u_sampled_data, axis = -1)
u_sampled_data_real = u_sampled_data.real
u_sampled_data_imag = u_sampled_data.imag
u_sampled_data_2c = np.concatenate((u_sampled_data_real, u_sampled_data_imag), axis = -1)
f = open('/home/cs-mri-gan/log_a5.txt', 'x')
f = open('/home/cs-mri-gan/log_a5.txt', 'a')
train(g_par, d_par, gan_model, dataset_real, u_sampled_data_2c, n_epochs, n_batch, n_critic, clip_val, n_patch, f)
|
[
"tensorflow.image.ssim",
"numpy.ones",
"keras.models.Model",
"numpy.clip",
"pickle.load",
"numpy.random.randint",
"keras.layers.LeakyReLU",
"keras.utils.multi_gpu_model",
"keras.optimizers.Adam",
"keras.layers.Conv2DTranspose",
"keras.layers.Concatenate",
"tensorflow.random_normal_initializer",
"keras.layers.Conv2D",
"keras.backend.clip",
"keras.backend.round",
"keras.layers.BatchNormalization",
"numpy.concatenate",
"numpy.vstack",
"keras.layers.Activation",
"numpy.expand_dims",
"keras.models.Input",
"keras.layers.Add",
"keras.backend.mean",
"keras.layers.Lambda"
] |
[((12779, 12832), 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['d_model'], {'gpus': '(4)', 'cpu_relocation': '(True)'}), '(d_model, gpus=4, cpu_relocation=True)\n', (12794, 12832), False, 'from keras.utils import multi_gpu_model\n'), ((12866, 12893), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (12870, 12893), False, 'from keras.optimizers import Adam, RMSprop\n'), ((13036, 13089), 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['g_model'], {'gpus': '(4)', 'cpu_relocation': '(True)'}), '(g_model, gpus=4, cpu_relocation=True)\n', (13051, 13089), False, 'from keras.utils import multi_gpu_model\n'), ((13198, 13225), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.5)'}), '(lr=0.0001, beta_1=0.5)\n', (13202, 13225), False, 'from keras.optimizers import Adam, RMSprop\n'), ((13626, 13641), 'pickle.load', 'pickle.load', (['df'], {}), '(df)\n', (13637, 13641), False, 'import pickle\n'), ((13659, 13674), 'pickle.load', 'pickle.load', (['uf'], {}), '(uf)\n', (13670, 13674), False, 'import pickle\n'), ((13691, 13728), 'numpy.expand_dims', 'np.expand_dims', (['dataset_real'], {'axis': '(-1)'}), '(dataset_real, axis=-1)\n', (13705, 13728), True, 'import numpy as np\n'), ((13748, 13787), 'numpy.expand_dims', 'np.expand_dims', (['u_sampled_data'], {'axis': '(-1)'}), '(u_sampled_data, axis=-1)\n', (13762, 13787), True, 'import numpy as np\n'), ((13897, 13964), 'numpy.concatenate', 'np.concatenate', (['(u_sampled_data_real, u_sampled_data_imag)'], {'axis': '(-1)'}), '((u_sampled_data_real, u_sampled_data_imag), axis=-1)\n', (13911, 13964), True, 'import numpy as np\n'), ((487, 508), 'keras.backend.clip', 'K.clip', (['y_pred', '(-1)', '(1)'], {}), '(y_pred, -1, 1)\n', (493, 508), True, 'from keras import backend as K\n'), ((822, 861), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(1.0)', '(0.02)'], {}), '(1.0, 0.02)\n', (850, 861), True, 'import tensorflow as tf\n'), ((876, 902), 'keras.models.Input', 'Input', ([], {'shape': '(256, 256, 1)'}), '(shape=(256, 256, 1))\n', (881, 902), False, 'from keras.models import Model, Input\n'), ((4140, 4170), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'out'}), '(inputs=inp, outputs=out)\n', (4145, 4170), False, 'from keras.models import Model, Input\n'), ((6067, 6106), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(1.0)', '(0.02)'], {}), '(1.0, 0.02)\n', (6095, 6106), True, 'import tensorflow as tf\n'), ((6184, 6200), 'keras.models.Input', 'Input', (['inp_shape'], {}), '(inp_shape)\n', (6189, 6200), False, 'from keras.models import Model, Input\n'), ((10158, 10198), 'keras.models.Model', 'Model', ([], {'inputs': 'inp_real_imag', 'outputs': 'out'}), '(inputs=inp_real_imag, outputs=out)\n', (10163, 10198), False, 'from keras.models import Model, Input\n'), ((10327, 10349), 'keras.models.Input', 'Input', ([], {'shape': 'inp_shape'}), '(shape=inp_shape)\n', (10332, 10349), False, 'from keras.models import Model, Input\n'), ((10441, 10492), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': '[out_dis, out_g, out_g1]'}), '(inputs=inp, outputs=[out_dis, out_g, out_g1])\n', (10446, 10492), False, 'from keras.models import Model, Input\n'), ((711, 737), 'keras.backend.mean', 'K.mean', (['(y_true * y_predict)'], {}), '(y_true * y_predict)\n', (717, 737), True, 'from keras import backend as K\n'), ((917, 1044), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (923, 1044), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1133, 1153), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1142, 1153), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1172, 1303), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 2)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 2, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1178, 1303), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1319, 1388), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (1337, 1388), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1406, 1426), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1415, 1426), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1445, 1576), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 4)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 4, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1451, 1576), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1592, 1661), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (1610, 1661), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1679, 1699), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1688, 1699), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1718, 1849), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 8)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 8, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1724, 1849), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1865, 1934), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (1883, 1934), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1952, 1972), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1961, 1972), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1991, 2123), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 16)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 16, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1997, 2123), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2139, 2208), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2157, 2208), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2226, 2246), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2235, 2246), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2265, 2397), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 32)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 32, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (2271, 2397), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2413, 2482), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2431, 2482), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2500, 2520), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2509, 2520), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2539, 2671), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 16)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 16, (1, 1), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (2545, 2671), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2687, 2756), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2705, 2756), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2774, 2794), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2783, 2794), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2813, 2944), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 8)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 8, (1, 1), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (2819, 2944), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2960, 3029), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2978, 3029), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3047, 3067), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3056, 3067), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3093, 3224), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 2)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 2, (1, 1), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3099, 3224), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((3240, 3309), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (3258, 3309), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3327, 3347), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3336, 3347), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3366, 3497), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 2)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 2, (3, 3), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3372, 3497), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((3513, 3582), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (3531, 3582), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3600, 3620), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3609, 3620), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3640, 3771), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 8)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 8, (3, 3), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3646, 3771), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((3788, 3857), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (3806, 3857), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3877, 3897), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3886, 3897), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3920, 3925), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3923, 3925), False, 'from keras.layers import Flatten, Add\n'), ((3946, 3966), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3955, 3966), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3987, 4123), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1)', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=1, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3993, 4123), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4252, 4389), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (4258, 4389), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4399, 4468), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (4417, 4468), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4484, 4504), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4493, 4504), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4521, 4541), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4532, 4541), False, 'from keras.layers import Concatenate, Activation\n'), ((4562, 4699), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (4568, 4699), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4710, 4779), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (4728, 4779), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4795, 4815), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4804, 4815), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4828, 4848), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4839, 4848), False, 'from keras.layers import Concatenate, Activation\n'), ((4874, 5011), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (4880, 5011), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((5022, 5091), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (5040, 5091), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5107, 5127), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5116, 5127), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5140, 5160), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5151, 5160), False, 'from keras.layers import Concatenate, Activation\n'), ((5182, 5319), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (5188, 5319), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((5330, 5399), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (5348, 5399), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5415, 5435), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5424, 5435), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5448, 5468), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5459, 5468), False, 'from keras.layers import Concatenate, Activation\n'), ((5490, 5628), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'fil', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=fil, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (5496, 5628), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((5639, 5665), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * beta)'], {}), '(lambda x: x * beta)\n', (5645, 5665), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5676, 5681), 'keras.layers.Add', 'Add', ([], {}), '()\n', (5679, 5681), False, 'from keras.layers import Flatten, Add\n'), ((5932, 5959), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * betar)'], {}), '(lambda x: x * betar)\n', (5938, 5959), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5970, 5975), 'keras.layers.Add', 'Add', ([], {}), '()\n', (5973, 5975), False, 'from keras.layers import Flatten, Add\n'), ((6216, 6343), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (6222, 6343), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((6382, 6402), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6391, 6402), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6435, 6563), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(128, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (6441, 6563), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((6593, 6662), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (6611, 6662), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6691, 6711), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6700, 6711), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6743, 6871), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(256, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (6749, 6871), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((6900, 6969), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (6918, 6969), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6998, 7018), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7007, 7018), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7048, 7176), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(512, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7054, 7176), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((7205, 7274), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (7223, 7274), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7303, 7323), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7312, 7323), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7361, 7489), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(512, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7367, 7489), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((7517, 7586), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (7535, 7586), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7613, 7633), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7622, 7633), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7657, 7794), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'fd', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=fd, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7663, 7794), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((7924, 8061), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'fd', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=fd, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7930, 8061), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((8079, 8084), 'keras.layers.Add', 'Add', ([], {}), '()\n', (8082, 8084), False, 'from keras.layers import Flatten, Add\n'), ((8111, 8249), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(1024)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(1024, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (8126, 8249), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((8278, 8347), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (8296, 8347), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((8376, 8394), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8386, 8394), False, 'from keras.layers import Concatenate, Activation\n'), ((8432, 8452), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8443, 8452), False, 'from keras.layers import Concatenate, Activation\n'), ((8495, 8632), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(256, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (8510, 8632), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((8663, 8732), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (8681, 8732), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((8761, 8779), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8771, 8779), False, 'from keras.layers import Concatenate, Activation\n'), ((8817, 8837), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8828, 8837), False, 'from keras.layers import Concatenate, Activation\n'), ((8881, 9018), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(128, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (8896, 9018), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((9048, 9117), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (9066, 9117), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((9146, 9164), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9156, 9164), False, 'from keras.layers import Concatenate, Activation\n'), ((9202, 9222), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (9213, 9222), False, 'from keras.layers import Concatenate, Activation\n'), ((9267, 9403), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (9282, 9403), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((9434, 9503), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (9452, 9503), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((9534, 9552), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9544, 9552), False, 'from keras.layers import Concatenate, Activation\n'), ((9594, 9614), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (9605, 9614), False, 'from keras.layers import Concatenate, Activation\n'), ((9661, 9797), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (9676, 9797), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((9829, 9898), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (9847, 9898), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((9929, 9947), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9939, 9947), False, 'from keras.layers import Concatenate, Activation\n'), ((9983, 10128), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'strides': '(1, 1)', 'activation': '"""tanh"""', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(1, (1, 1), strides=(1, 1), activation='tanh', padding='same',\n use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros')\n", (9989, 10128), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((541, 556), 'keras.backend.round', 'K.round', (['y_pred'], {}), '(y_pred)\n', (548, 556), True, 'from keras import backend as K\n'), ((618, 652), 'tensorflow.image.ssim', 'tf.image.ssim', (['y_true', 'y_pred', '(2.0)'], {}), '(y_true, y_pred, 2.0)\n', (631, 652), True, 'import tensorflow as tf\n'), ((11768, 11820), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataset_real.shape[0]', 'n_batch'], {}), '(0, dataset_real.shape[0], n_batch)\n', (11785, 11820), True, 'import numpy as np\n'), ((11919, 11958), 'numpy.ones', 'np.ones', (['(n_batch, n_patch, n_patch, 1)'], {}), '((n_batch, n_patch, n_patch, 1))\n', (11926, 11958), True, 'import numpy as np\n'), ((10932, 10987), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataset_real.shape[0]', 'half_batch'], {}), '(0, dataset_real.shape[0], half_batch)\n', (10949, 10987), True, 'import numpy as np\n'), ((11068, 11110), 'numpy.ones', 'np.ones', (['(half_batch, n_patch, n_patch, 1)'], {}), '((half_batch, n_patch, n_patch, 1))\n', (11075, 11110), True, 'import numpy as np\n'), ((11145, 11202), 'numpy.random.randint', 'np.random.randint', (['(0)', 'u_sampled_data.shape[0]', 'half_batch'], {}), '(0, u_sampled_data.shape[0], half_batch)\n', (11162, 11202), True, 'import numpy as np\n'), ((11291, 11333), 'numpy.ones', 'np.ones', (['(half_batch, n_patch, n_patch, 1)'], {}), '((half_batch, n_patch, n_patch, 1))\n', (11298, 11333), True, 'import numpy as np\n'), ((11367, 11394), 'numpy.vstack', 'np.vstack', (['(X_real, X_fake)'], {}), '((X_real, X_fake))\n', (11376, 11394), True, 'import numpy as np\n'), ((11396, 11423), 'numpy.vstack', 'np.vstack', (['(y_real, y_fake)'], {}), '((y_real, y_fake))\n', (11405, 11423), True, 'import numpy as np\n'), ((11609, 11640), 'numpy.clip', 'np.clip', (['w', '(-clip_val)', 'clip_val'], {}), '(w, -clip_val, clip_val)\n', (11616, 11640), True, 'import numpy as np\n')]
|
import argparse
def print_result(args):
# Print results
with open(args.accuracy, 'r') as f:
score = f.read()
print(f"Random forest (accuracy): {score}")
if __name__ == '__main__':
# Defining and parsing the command-line arguments
parser = argparse.ArgumentParser(description='My program description')
parser.add_argument('--data', type=str)
parser.add_argument('--accuracy', type=str)
args = parser.parse_args()
print_result(args)
|
[
"argparse.ArgumentParser"
] |
[((276, 337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""My program description"""'}), "(description='My program description')\n", (299, 337), False, 'import argparse\n')]
|
from typing import Dict, List
import torch
from functools import partial
from backprop.models import PathModel
from torch.optim.adamw import AdamW
from sentence_transformers import SentenceTransformer
class STModel(PathModel):
"""
Class for models which are initialised from Sentence Transformers
Attributes:
model_path: path to ST model
name: string identifier for the model. Lowercase letters and numbers.
No spaces/special characters except dashes.
max_length: Max supported token length for vectorisation
description: String description of the model.
tasks: List of supported task strings
details: Dictionary of additional details about the model
init_model: Class used to initialise model
device: Device for model. Defaults to "cuda" if available.
"""
def __init__(self, model_path, init_model=SentenceTransformer, name: str = None,
description: str = None, tasks: List[str] = None, details: Dict = None,
max_length=512, device=None):
init_model = partial(init_model, device=device)
tasks = ["text-vectorisation"]
PathModel.__init__(self, model_path, name=name, description=description,
details=details, tasks=tasks,
init_model=init_model,
device=device)
self.max_length = max_length
@staticmethod
def list_models():
from .models_list import models
return models
@torch.no_grad()
def __call__(self, task_input, task="text-vectorisation", return_tensor=False):
"""
Uses the model for the text-vectorisation task
Args:
task_input: input dictionary according to the ``text-vectorisation`` task specification
task: text-vectorisation
"""
is_list = False
if task == "text-vectorisation":
input_ids = None
attention_mask = None
text = task_input.get("text")
if type(text) == list:
is_list = True
else:
text = [text]
features = self.model.tokenizer(text, truncation=True, padding=True, return_tensors="pt").to(self._model_device)
text_vecs = self.vectorise(features)
if not return_tensor:
text_vecs = text_vecs.tolist()
output = text_vecs
if not is_list:
output = output[0]
return output
else:
raise ValueError(f"Unsupported task '{task}'")
def training_step(self, params, task="text-vectorisation"):
text = params["text"]
return self.vectorise(text)
def process_batch(self, params, task="text-vectorisation"):
if task == "text-vectorisation":
max_length = params["max_length"] or self.max_length
if max_length > self.max_length:
raise ValueError(f"This model has a max_length limit of {self.max_length}")
text = params["text"]
return self.model.tokenizer(text, truncation=True, padding="max_length", return_tensors="pt")
def vectorise(self, features):
return self.model.forward(features)["sentence_embedding"]
def configure_optimizers(self):
return AdamW(params=self.model.parameters(), lr=2e-5, eps=1e-6, correct_bias=False)
|
[
"backprop.models.PathModel.__init__",
"functools.partial",
"torch.no_grad"
] |
[((1562, 1577), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1575, 1577), False, 'import torch\n'), ((1092, 1126), 'functools.partial', 'partial', (['init_model'], {'device': 'device'}), '(init_model, device=device)\n', (1099, 1126), False, 'from functools import partial\n'), ((1176, 1320), 'backprop.models.PathModel.__init__', 'PathModel.__init__', (['self', 'model_path'], {'name': 'name', 'description': 'description', 'details': 'details', 'tasks': 'tasks', 'init_model': 'init_model', 'device': 'device'}), '(self, model_path, name=name, description=description,\n details=details, tasks=tasks, init_model=init_model, device=device)\n', (1194, 1320), False, 'from backprop.models import PathModel\n')]
|
from c2nl.tokenizers.code_tokenizer import CodeTokenizer, Tokens, Tokenizer
import argparse
import re
from os import path
import javalang
from pathlib import Path
def get_project_root() -> Path:
"""Returns project root folder."""
return str(Path(__file__).parent.parent.parent)
def get_java_method_map(tree):
"""High level model that handles initializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
method_map = []
for method in tree.types[0].methods:
if len(method.annotations) > 0:
method_map.append([method.annotations[0].position.line, method.position.line, method.annotations[0].position.column])
else:
method_map.append([method.position.line, method.position.line, method.position.column])
return method_map
def get_method_location_map(java_file_path):
method_map = []
with open(java_file_path, 'r') as java_file:
java_file_text = java_file.read()
tree = javalang.parse.parse(java_file_text)
method_map = get_java_method_map(tree)
return method_map
def process_java_file(java_file_path):
method_map = get_method_location_map(java_file_path)
total_methods = len(method_map)
method_text = []
tokenizer = CodeTokenizer(True, True)
with open(java_file_path, 'r') as process_sample_file:
current_line_no = 1
method_no = 0
current_method = []
count_open_bracket = 0
count_close_bracket = 0
verify = False
for x in process_sample_file:
if current_line_no >= method_map[method_no][0]:
current_method.append(x)
if current_line_no >= method_map[method_no][1]:
count_open_bracket = count_open_bracket + x.count('{')
count_close_bracket = count_close_bracket + x.count('}')
if count_open_bracket > 0:
verify = True
if count_open_bracket == count_close_bracket and verify:
temp_method_text = ' '.join([line.strip() for line in current_method])
temp_method_text = tokenize_java_method(tokenizer, temp_method_text)
method_text.append([method_map[method_no], temp_method_text])
current_method = []
method_no = method_no + 1
count_open_bracket = 0
count_close_bracket = 0
verify = False
if method_no == total_methods:
break
current_line_no = current_line_no + 1
return method_text
def tokenize_java_method(tokenizer, inline_method_text):
if tokenizer is None:
tokenizer = CodeTokenizer(True, True)
text = ''
for i in tokenizer.tokenize(inline_method_text).data:
s = '(@|\+|\-|,|\]|\[|{|}|=|!|\(|\)|>|<|;|"|/|\.)'
res = list(filter(None, re.split(s, str(i[0]))))
res = ' '.join(res)
text = text + ' ' + res
return text[1:]
def tokenize_java(java_file_path, save_data):
# check if the file exist
if path.exists(java_file_path):
print("Processing the java file : % s" % java_file_path)
else:
raise Exception('No such java file at location: %s' % java_file_path)
method_text = process_java_file(java_file_path)
if save_data:
with open(str(get_project_root()) + '/output.code', 'w+') as output_sample_file:
for line, method in method_text:
output_sample_file.write(method + '\n')
print('Saving tokenize fine into : %s' % get_project_root() + '/output.code')
return method_text
if __name__ == '__main__':
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
'Java Code Tokenizer Generator',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# Adding Java file path argument
parser.add_argument("-p", "--file_path", help="Input file path", required=True)
# Adding Java file path argument
parser.add_argument("-f", "--file_type", help="File type", required=True,
choices=['java', 'method'], )
# Read arguments from command line
args = parser.parse_args()
if args.file_type == 'java':
print("Tokenized : % s" % tokenize_java(args.file_path, True))
if args.file_type == 'method':
if path.exists(args.file_path):
print("Processing the file : % s" % args.file_path)
with open(args.file_path, 'r') as sample_file:
java_file_content = sample_file.read()
tokenize_method_text = tokenize_java_method(None, java_file_content)
with open('../../output.code', 'w+') as output_file:
output_file.write(tokenize_method_text)
print("Tokenized : % s" % tokenize_method_text)
else:
raise Exception('No such file at location: %s' % args.file_path)
|
[
"c2nl.tokenizers.code_tokenizer.CodeTokenizer",
"argparse.ArgumentParser",
"os.path.exists",
"javalang.parse.parse",
"pathlib.Path"
] |
[((1300, 1325), 'c2nl.tokenizers.code_tokenizer.CodeTokenizer', 'CodeTokenizer', (['(True)', '(True)'], {}), '(True, True)\n', (1313, 1325), False, 'from c2nl.tokenizers.code_tokenizer import CodeTokenizer, Tokens, Tokenizer\n'), ((3111, 3138), 'os.path.exists', 'path.exists', (['java_file_path'], {}), '(java_file_path)\n', (3122, 3138), False, 'from os import path\n'), ((3753, 3870), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Java Code Tokenizer Generator"""'], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "('Java Code Tokenizer Generator', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (3776, 3870), False, 'import argparse\n'), ((1021, 1057), 'javalang.parse.parse', 'javalang.parse.parse', (['java_file_text'], {}), '(java_file_text)\n', (1041, 1057), False, 'import javalang\n'), ((2731, 2756), 'c2nl.tokenizers.code_tokenizer.CodeTokenizer', 'CodeTokenizer', (['(True)', '(True)'], {}), '(True, True)\n', (2744, 2756), False, 'from c2nl.tokenizers.code_tokenizer import CodeTokenizer, Tokens, Tokenizer\n'), ((4402, 4429), 'os.path.exists', 'path.exists', (['args.file_path'], {}), '(args.file_path)\n', (4413, 4429), False, 'from os import path\n'), ((251, 265), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (255, 265), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
from fastapi.routing import APIRouter
from apps.places.views import places
router = APIRouter()
router.include_router(places.router, prefix='/places')
|
[
"fastapi.routing.APIRouter"
] |
[((111, 122), 'fastapi.routing.APIRouter', 'APIRouter', ([], {}), '()\n', (120, 122), False, 'from fastapi.routing import APIRouter\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the date formatter of the TransmartCopyWriter.
"""
from datetime import datetime, timezone, date
from dateutil.tz import gettz
from transmart_loader.copy_writer import format_date, microseconds
def test_date_serialization():
assert format_date(
date(2019, 6, 28)) == '2019-06-28'
assert format_date(
datetime(2019, 6, 28, 13, 2, 58,
tzinfo=timezone.utc)) == '2019-06-28 13:02:58'
assert format_date(
datetime(2019, 6, 28, 13, 2, 58, 12345,
tzinfo=timezone.utc)) == '2019-06-28 13:02:58.012345'
assert format_date(
datetime(2019, 6, 28, 13, 2, 58,
tzinfo=gettz('Europe/Amsterdam')
)) == '2019-06-28 11:02:58'
assert format_date(datetime.fromtimestamp(
microseconds(date(2019, 6, 28))/1000,
timezone.utc)) == '2019-06-28 00:00:00'
assert format_date(datetime.fromtimestamp(
microseconds(datetime(2019, 6, 28, 13, 2, 58))/1000,
timezone.utc)) == '2019-06-28 13:02:58'
|
[
"dateutil.tz.gettz",
"datetime.date",
"datetime.datetime"
] |
[((319, 336), 'datetime.date', 'date', (['(2019)', '(6)', '(28)'], {}), '(2019, 6, 28)\n', (323, 336), False, 'from datetime import datetime, timezone, date\n'), ((386, 439), 'datetime.datetime', 'datetime', (['(2019)', '(6)', '(28)', '(13)', '(2)', '(58)'], {'tzinfo': 'timezone.utc'}), '(2019, 6, 28, 13, 2, 58, tzinfo=timezone.utc)\n', (394, 439), False, 'from datetime import datetime, timezone, date\n'), ((515, 575), 'datetime.datetime', 'datetime', (['(2019)', '(6)', '(28)', '(13)', '(2)', '(58)', '(12345)'], {'tzinfo': 'timezone.utc'}), '(2019, 6, 28, 13, 2, 58, 12345, tzinfo=timezone.utc)\n', (523, 575), False, 'from datetime import datetime, timezone, date\n'), ((715, 740), 'dateutil.tz.gettz', 'gettz', (['"""Europe/Amsterdam"""'], {}), "('Europe/Amsterdam')\n", (720, 740), False, 'from dateutil.tz import gettz\n'), ((854, 871), 'datetime.date', 'date', (['(2019)', '(6)', '(28)'], {}), '(2019, 6, 28)\n', (858, 871), False, 'from datetime import datetime, timezone, date\n'), ((995, 1027), 'datetime.datetime', 'datetime', (['(2019)', '(6)', '(28)', '(13)', '(2)', '(58)'], {}), '(2019, 6, 28, 13, 2, 58)\n', (1003, 1027), False, 'from datetime import datetime, timezone, date\n')]
|
"""Identity matrix."""
from scipy import sparse
import numpy as np
def iden(dim: int, is_sparse: bool = False) -> np.ndarray:
r"""
Calculate the :code:`dim`-by-:code:`dim` identity matrix [WIKID]_.
Returns the :code:`dim`-by-:code:`dim` identity matrix. If :code:`is_sparse
= False` then the matrix will be full. If :code:`is_sparse = True` then the
matrix will be sparse.
.. math::
\mathbb{I} = \begin{pmatrix}
1 & 0 & 0 & \ldots & 0 \\
0 & 1 & 0 & \ldots & 0 \\
0 & 0 & 1 & \ldots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \ldots & 1
\end{pmatrix}
Only use this function within other functions to easily get the correct
identity matrix. If you always want either the full or the sparse
identity matrix, just use numpy's built-in np.identity function.
Examples
==========
The identity matrix generated from :math:`d = 3` yields the following
matrix:
.. math::
\mathbb{I}_3 = \begin{pmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{pmatrix}
>>> from toqito.matrices import iden
>>> iden(3)
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
It is also possible to create sparse identity matrices. The sparse identity
matrix generated from :math:`d = 10` yields the following matrix:
>>> from toqito.matrices import iden
>>> iden(10, True)
<10x10 sparse matrix of type '<class 'numpy.float64'>' with 10 stored
elements (1 diagonals) in DIAgonal format>
References
==========
.. [WIKID] Wikipedia: Identity matrix
https://en.wikipedia.org/wiki/Identity_matrix
:param dim: Integer representing dimension of identity matrix.
:param is_sparse: Whether or not the matrix is sparse.
:return: Sparse identity matrix of dimension :code:`dim`.
"""
if is_sparse:
id_mat = sparse.eye(dim)
else:
id_mat = np.identity(dim)
return id_mat
|
[
"numpy.identity",
"scipy.sparse.eye"
] |
[((2041, 2056), 'scipy.sparse.eye', 'sparse.eye', (['dim'], {}), '(dim)\n', (2051, 2056), False, 'from scipy import sparse\n'), ((2084, 2100), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (2095, 2100), True, 'import numpy as np\n')]
|
# coding:utf-8
# 一个简单的Maya Python插件 By Jason (<EMAIL>), 公众号: WendyAndAndy
import sys
from maya.api import OpenMaya as om
def maya_useNewAPI():
pass
__VENDOR = '<EMAIL> | <EMAIL> | iJasonLee@WeChat'
__VERSION= '2018.08.08.01'
class HelloMaya(om.MPxCommand):
command = 'pyHello'
def __init__(self):
super(HelloMaya, self).__init__()
def doIt(self, args):
print(u'Hello, Maya. 中文测试:你好,妈呀!')
@staticmethod
def creator():
return HelloMaya()
def initializePlugin(obj):
plugin = om.MFnPlugin(obj, __VENDOR, __VERSION)
try:
plugin.registerCommand(HelloMaya.command, HelloMaya.creator)
except:
sys.stderr.write('register pyHello command failed')
raise
def uninitializePlugin(obj):
plugin = om.MFnPlugin(obj, __VENDOR, __VERSION)
try:
plugin.deregisterCommand(HelloMaya.command)
except:
sys.stderr.write('deregister pyHello command failed')
raise
|
[
"sys.stderr.write",
"maya.api.OpenMaya.MFnPlugin"
] |
[((534, 572), 'maya.api.OpenMaya.MFnPlugin', 'om.MFnPlugin', (['obj', '__VENDOR', '__VERSION'], {}), '(obj, __VENDOR, __VERSION)\n', (546, 572), True, 'from maya.api import OpenMaya as om\n'), ((781, 819), 'maya.api.OpenMaya.MFnPlugin', 'om.MFnPlugin', (['obj', '__VENDOR', '__VERSION'], {}), '(obj, __VENDOR, __VERSION)\n', (793, 819), True, 'from maya.api import OpenMaya as om\n'), ((671, 722), 'sys.stderr.write', 'sys.stderr.write', (['"""register pyHello command failed"""'], {}), "('register pyHello command failed')\n", (687, 722), False, 'import sys\n'), ((901, 954), 'sys.stderr.write', 'sys.stderr.write', (['"""deregister pyHello command failed"""'], {}), "('deregister pyHello command failed')\n", (917, 954), False, 'import sys\n')]
|
"""Routines for processing workouts."""
from typing import Union, Tuple, Optional
from xml.etree.ElementTree import (ElementTree, Element,
SubElement, tostring)
from lark import Lark
from lark import Transformer
class WorkoutTransformer(Transformer):
"""Class to process workout parse-trees."""
def duration(self, d: list) -> int:
"""Return duration in seconds."""
if d[1] == 'hrs' or d[1] == 'h':
# return duration in seconds
return int(d[0]*3600)
elif d[1] == 'min' or d[1] == 'm':
# return duration in seconds
return int(d[0]*60)
elif d[1] == 'sec' or d[1] == 's':
return int(d[0])
else:
# this should not happen
raise ValueError(f'Unexpected unit of time: {d[1]}')
def durations(self, d: list) -> int:
"""Return total duration."""
return sum(d)
def steady_state(self, s: list) -> dict:
"""Return steady-state."""
return dict(duration=s[0], power=s[1])
def ramp(self, s: list) -> dict:
"""Return ramp."""
return dict(duration=s[0], power=s[1])
def power(self, p: list[float]) -> Union[float, list]:
"""Return power."""
if len(p) == 1:
return p[0]
else:
return p
def repeats(self, r: list) -> Tuple[str, int]:
"""Return repeats."""
return 'repeats', r[0]
def intervals(self, i: list) -> Tuple[str, list]:
"""Return intervals."""
return 'intervals', i
def block(self, b: list) -> dict:
"""Return block."""
return dict(b)
INT = int
NUMBER = float
TIME_UNIT = str
workout = list
class ZWOG():
"""Zwift workout generator (ZWOG)."""
def __init__(self, workout: str,
author: str = ('Zwift workout generator '
'(https://github.com/tare/zwog)'),
name: str = 'Structured workout',
category: Optional[str] = None,
subcategory: Optional[str] = None):
"""Initialize ZWOG.
Args:
workout: Workout as a string.
author: Author.
name: Workout name.
category: Workout category.
subcategory: Workout subcategory.
"""
parser = Lark(r"""
workout: block*
block: [repeats "x"] intervals
intervals: (ramp|steady_state)~1 ("," (steady_state|ramp))*
steady_state: durations "@" steady_state_power "%" "FTP"
ramp: durations "from" ramp_power "%" "FTP"
durations: duration+
duration: NUMBER TIME_UNIT
time_unit: TIME_UNIT
TIME_UNIT: ("sec"|"s"|"min"|"m"|"hrs"|"h")
repeats: INT
steady_state_power: NUMBER -> power
ramp_power: NUMBER "to" NUMBER -> power
%ignore WS
%import common.WS
%import common.INT
%import common.NUMBER
""", start='workout')
self.__name = name
self.__author = author
self.__category = category
self.__subcategory = subcategory
# self.__tree_workout = parser.parse(workout)
self.__json_workout = (WorkoutTransformer()
.transform(parser.parse(workout)))
self.__pretty_workout = self._json_to_pretty(self.__json_workout)
self.__zwo_workout = self._json_to_zwo(self.__json_workout)
self.__tss = self._json_to_tss(self.__json_workout)
def save_zwo(self, filename) -> None:
"""Save the workout in the ZWO format.
Args:
filename: Filename.
"""
self.__zwo_workout.write(filename)
def __str__(self):
"""Return str."""
return self.__pretty_workout
@property
def tss(self) -> float:
"""Get TSS."""
return self.__tss
@property
def json_workout(self) -> list[dict]:
"""Return workout as JSON."""
return self.__json_workout
# @property
# def _tree_workout(self) -> str:
# """"""
# return self.__tree_workout.pretty()
@property
def zwo_workout(self) -> str:
"""Get the workout as ZWO."""
return tostring(self.__zwo_workout.getroot(),
encoding='unicode')+'\n'
def _is_ramp(self, block: dict) -> bool:
"""Tell whether the block is a ramp block.
Args:
block: Block.
Returns:
True if a ramp, False otherwise.
"""
return bool(len(block['intervals']) == 1 and
isinstance(block['intervals'][0]['power'],
list))
def _is_steady_state(self, block: dict) -> bool:
"""Tell whether the block is a steady-state block.
Args:
block: Block.
Returns:
True if a steady-state, False otherwise.
"""
return bool(len(block['intervals']) == 1 and not
isinstance(block['intervals'][0]['power'],
list))
def _is_intervalst(self, block: dict) -> bool:
"""Tell whether the block is an intervalst.
Args:
block: Block.
Returns:
True if an intervalst , False otherwise.
"""
return bool(len(block['intervals']) == 2 and not
isinstance(block['intervals'][0]['power'], list) and not
isinstance(block['intervals'][1]['power'], list))
def _interval_to_xml(self, interval: dict,
repeats: int = 1) -> Element:
"""Return the interval as a XML node.
Args:
interval: The interval.
repeats: Number of repeats.
Returns:
XML node representing the interval.
"""
if not isinstance(interval, list):
if not isinstance(interval['power'], list): # steady-state
element = Element('SteadyState')
element.set('Duration', str(interval['duration']))
element.set('Power', str(interval['power']/100))
else: # ramp
element = Element('Ramp')
element.set('Duration', str(interval['duration']))
element.set('PowerLow', str(interval['power'][0]/100))
element.set('PowerHigh', str(interval['power'][1]/100))
else: # intervalst
element = Element('IntervalsT')
element.set('Repeat', str(repeats))
element.set('OnDuration', str(interval[0]['duration']))
element.set('OnPower', str(interval[0]['power']/100))
element.set('OffDuration', str(interval[1]['duration']))
element.set('OffPower', str(interval[1]['power']/100))
return element
def _json_to_zwo(self, blocks: list[dict]) -> ElementTree:
"""Convert JSON to ZWO.
See: https://github.com/h4l/zwift-workout-file-reference/blob/master/zwift_workout_file_tag_reference.md
Args:
blocks: Blocks.
Returns:
XML tree representing the workout.
""" # pylint: disable=line-too-long # noqa
root = Element('workout_file')
# fill metadata
for child, value in [('author', self.__author),
('name', self.__name),
('description',
('This workout was generated using ZWOG.\n\n'
f'{self._json_to_pretty(blocks)}')),
('sportType', 'bike'),
('category', self.__category),
('subcategory', self.__subcategory)]:
if value is not None:
tmp = SubElement(root, child)
tmp.text = value
tmp = SubElement(root, 'workout')
for block_idx, block in enumerate(blocks):
# warmup and ramp
if block_idx in [0, (len(blocks)-1)] and self._is_ramp(block):
element = self._interval_to_xml(block['intervals'][0])
if block_idx == 0:
element.tag = 'Warmup'
else:
element.tag = 'Cooldown'
tmp.append(element)
else:
# ramp or steady state
if self._is_ramp(block) or self._is_steady_state(block):
tmp.append(self._interval_to_xml(block['intervals'][0]))
else:
if 'repeats' in block:
repeats = block['repeats']
else:
repeats = 1
if self._is_intervalst(block): # intervalst
tmp.append(self._interval_to_xml(block['intervals'],
repeats=repeats))
else: # non intervalst
for _ in range(repeats):
for interval in block['intervals']:
tmp.append(self._interval_to_xml(interval))
tree = ElementTree(root)
return tree
def _duration_to_pretty_str(self, duration: int) -> str:
"""Prettify and stringify duration given in seconds.
Args:
duration: Duration in seconds.
Returns:
Prettified and stringified duration.
"""
pretty_str = ''
if int(duration/3600) > 0:
pretty_str += f'{int(duration/3600)}h'
if int((duration % 3600)/60) > 0:
pretty_str += f'{int((duration % 3600)/60)}m'
if duration % 60 > 0:
pretty_str += f'{int((duration % 60))}s'
return pretty_str
def _interval_to_str(self, interval: dict) -> str:
"""Return the interval as a string.
Args:
interval: Interval.
Returns:
String representation of the interval.
"""
if isinstance(interval['power'], list):
return (f'{self._duration_to_pretty_str(interval["duration"])}'
f' from {interval["power"][0]:.0f} to '
f'{interval["power"][1]:.0f}% FTP')
else:
return (f'{self._duration_to_pretty_str(interval["duration"])} '
f'@ {interval["power"]:.0f}% FTP')
def _interval_to_tss(self, interval: dict) -> float:
"""Calculate TSS for an interval.
Args:
interval: Interval.
Returns:
Calculated TSS.
"""
if isinstance(interval['power'], list):
min_power = min([interval['power'][0], interval['power'][1]])
max_power = max([interval['power'][0], interval['power'][1]])
tss = interval['duration']/3600*min_power
tss += interval['duration']/3600*(max_power-min_power)/2
else:
tss = interval['duration']/3600*interval['power']
return tss
def _json_to_pretty(self, blocks: list[dict]) -> str:
"""Return the workout as a string.
Args:
blocks (list[dict]): Workout.
Returns:
str: String representation of the workout.
"""
output = []
for block in blocks:
tmp = ''
if 'repeats' in block:
tmp = f'{block["repeats"]}x '
output.append(tmp + ', '.join([
self._interval_to_str(interval)
for interval in block['intervals']]))
return '\n'.join(output)
def _json_to_tss(self, blocks: list[dict]) -> float:
"""Calculate TSS for a workout.
Args:
blocks: Workout.
Returns:
float: Calculated TSS.
"""
tss = 0
for block in blocks:
# ramp or steady state
if self._is_ramp(block) or self._is_steady_state(block):
tss += self._interval_to_tss(block['intervals'][0])
else:
if 'repeats' in block:
repeats = block['repeats']
else:
repeats = 1
tss += sum([repeats*self._interval_to_tss(interval)
for interval in block['intervals']])
return tss
|
[
"xml.etree.ElementTree.ElementTree",
"lark.Lark",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.SubElement"
] |
[((2381, 3101), 'lark.Lark', 'Lark', (['"""\n workout: block*\n block: [repeats "x"] intervals\n intervals: (ramp|steady_state)~1 ("," (steady_state|ramp))*\n steady_state: durations "@" steady_state_power "%" "FTP"\n ramp: durations "from" ramp_power "%" "FTP"\n durations: duration+\n duration: NUMBER TIME_UNIT\n time_unit: TIME_UNIT\n TIME_UNIT: ("sec"|"s"|"min"|"m"|"hrs"|"h")\n repeats: INT\n steady_state_power: NUMBER -> power\n ramp_power: NUMBER "to" NUMBER -> power\n\n %ignore WS\n %import common.WS\n %import common.INT\n %import common.NUMBER\n """'], {'start': '"""workout"""'}), '(\n """\n workout: block*\n block: [repeats "x"] intervals\n intervals: (ramp|steady_state)~1 ("," (steady_state|ramp))*\n steady_state: durations "@" steady_state_power "%" "FTP"\n ramp: durations "from" ramp_power "%" "FTP"\n durations: duration+\n duration: NUMBER TIME_UNIT\n time_unit: TIME_UNIT\n TIME_UNIT: ("sec"|"s"|"min"|"m"|"hrs"|"h")\n repeats: INT\n steady_state_power: NUMBER -> power\n ramp_power: NUMBER "to" NUMBER -> power\n\n %ignore WS\n %import common.WS\n %import common.INT\n %import common.NUMBER\n """\n , start=\'workout\')\n', (2385, 3101), False, 'from lark import Lark\n'), ((7303, 7326), 'xml.etree.ElementTree.Element', 'Element', (['"""workout_file"""'], {}), "('workout_file')\n", (7310, 7326), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n'), ((7956, 7983), 'xml.etree.ElementTree.SubElement', 'SubElement', (['root', '"""workout"""'], {}), "(root, 'workout')\n", (7966, 7983), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n'), ((9242, 9259), 'xml.etree.ElementTree.ElementTree', 'ElementTree', (['root'], {}), '(root)\n', (9253, 9259), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n'), ((6553, 6574), 'xml.etree.ElementTree.Element', 'Element', (['"""IntervalsT"""'], {}), "('IntervalsT')\n", (6560, 6574), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n'), ((6070, 6092), 'xml.etree.ElementTree.Element', 'Element', (['"""SteadyState"""'], {}), "('SteadyState')\n", (6077, 6092), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n'), ((6277, 6292), 'xml.etree.ElementTree.Element', 'Element', (['"""Ramp"""'], {}), "('Ramp')\n", (6284, 6292), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n'), ((7884, 7907), 'xml.etree.ElementTree.SubElement', 'SubElement', (['root', 'child'], {}), '(root, child)\n', (7894, 7907), False, 'from xml.etree.ElementTree import ElementTree, Element, SubElement, tostring\n')]
|
from dataclasses import dataclass
from typing import Optional
@dataclass(frozen=True)
class RegistrantNotRegisteredError(Exception):
"""Raised when registrant name does not exist in the register"""
cls: type
registrant_name: str
register: Optional[dict]
def __post_init__(self):
super().__init__(self._message())
def _message(self):
if self.register is None:
return self._empty_register_message()
return self._register_name_not_found_message()
def _prefix_message(self):
return f"Registrant name '{self.registrant_name}' is not found in the register of class '{self.cls.__name__}'"
def _empty_register_message(self):
return f"""
{self._prefix_message()} with an empty register.
"""
def _register_name_not_found_message(self):
return f"""
{self._prefix_message()} with registrant names {tuple(self.register.keys())}.
"""
|
[
"dataclasses.dataclass"
] |
[((65, 87), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (74, 87), False, 'from dataclasses import dataclass\n')]
|
import requests
SCHEMA_REGISTRY = "http://localhost:8081"
def subjects():
resp = requests.get(
f"{SCHEMA_REGISTRY}/subjects",
headers={"Content-Type": "application/json"}
)
resp.raise_for_status()
return resp.json()
# curl -X DELETE http://localhost:8081/subjects/com.udacity.station.arrivals-value
def delete_subject(subject):
resp = requests.delete(
f"{SCHEMA_REGISTRY}/subjects/{subject}"
)
def main():
for subject in subjects():
delete_subject(subject)
if __name__ == '__main__':
main()
|
[
"requests.delete",
"requests.get"
] |
[((87, 180), 'requests.get', 'requests.get', (['f"""{SCHEMA_REGISTRY}/subjects"""'], {'headers': "{'Content-Type': 'application/json'}"}), "(f'{SCHEMA_REGISTRY}/subjects', headers={'Content-Type':\n 'application/json'})\n", (99, 180), False, 'import requests\n'), ((390, 446), 'requests.delete', 'requests.delete', (['f"""{SCHEMA_REGISTRY}/subjects/{subject}"""'], {}), "(f'{SCHEMA_REGISTRY}/subjects/{subject}')\n", (405, 446), False, 'import requests\n')]
|
# Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: NumericDisplay.py 2696 2008-09-05 09:33:43Z graham.klyne $
#
# Widget class for simple button on a form
#
from urlparse import urljoin
from turbogears.widgets.base import Widget, CompoundWidget, WidgetsList
from turbogears.widgets.forms import FormField, Button
from EventLib.URI import EventBaseUri
SetNumericDisplayValueEvent = urljoin(EventBaseUri, "SetNumericDisplayValue")
SetNumericDisplayStateEvent = urljoin(EventBaseUri, "SetNumericDisplayState")
class NumericDisplay(FormField):
template = """
<span xmlns:py="http://purl.org/kid/ns#"
py:attrs="attrs"
class="${field_class}"
py:content="str(value)"
InitializeWidget="NumericDisplay_Init"
>
(NumericDisplay)
</span>
"""
params = ["attrs", "value_override"]
params_doc = {'attrs' : 'Dictionary containing extra (X)HTML attributes for'
' the numeric display tag'}
attrs = {}
def update_params(self, d):
super(NumericDisplay, self).update_params(d)
if self.is_named:
d['attrs']['name'] = d["name"]
d['attrs']['id'] = d["field_id"]
d['attrs']['SetNumericDisplayValueEvent'] = SetNumericDisplayValueEvent
d['attrs']['SetNumericDisplayStateEvent'] = SetNumericDisplayStateEvent
if d.get('value_override', None):
d['value'] = d['value_override']
# End.
|
[
"urlparse.urljoin"
] |
[((421, 468), 'urlparse.urljoin', 'urljoin', (['EventBaseUri', '"""SetNumericDisplayValue"""'], {}), "(EventBaseUri, 'SetNumericDisplayValue')\n", (428, 468), False, 'from urlparse import urljoin\n'), ((499, 546), 'urlparse.urljoin', 'urljoin', (['EventBaseUri', '"""SetNumericDisplayState"""'], {}), "(EventBaseUri, 'SetNumericDisplayState')\n", (506, 546), False, 'from urlparse import urljoin\n')]
|
import faiss
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import normalized_mutual_info_score
from argparse import ArgumentParser
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch metric learning nmi script")
# Optional arguments for the launch helper
parser.add_argument("--num_workers", type=int, default=4,
help="The number of workers for eval")
parser.add_argument("--snap", type=str,
help="The snapshot to compute nmi")
parser.add_argument("--output", type=str, default="/data1/output/",
help="The output file")
parser.add_argument("--dataset", type=str, default="StanfordOnlineProducts",
help="The dataset for training")
parser.add_argument('--binarize', action='store_true')
return parser.parse_args()
def test_nmi(embeddings, labels, output_file):
unique_labels = np.unique(labels)
kmeans = KMeans(n_clusters=unique_labels.size, random_state=0, n_jobs=-1).fit(embeddings)
nmi = normalized_mutual_info_score(kmeans.labels_, labels)
print("NMI: {}".format(nmi))
return nmi
def test_nmi_faiss(embeddings, labels):
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
unique_labels = np.unique(labels)
d = embeddings.shape[1]
kmeans = faiss.Clustering(d, unique_labels.size)
kmeans.verbose = True
kmeans.niter = 300
kmeans.nredo = 10
kmeans.seed = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
kmeans.train(embeddings, index)
dists, pred_labels = index.search(embeddings, 1)
pred_labels = pred_labels.squeeze()
nmi = normalized_mutual_info_score(labels, pred_labels)
print("NMI: {}".format(nmi))
return nmi
if __name__ == '__main__':
args = parse_args()
embedding_file = args.snap.replace('.pth', '_embed.npy')
all_embeddings = np.load(embedding_file)
lable_file = args.snap.replace('.pth', '_label.npy')
all_labels = np.load(lable_file)
nmi = test_nmi_faiss(all_embeddings, all_labels)
|
[
"numpy.load",
"faiss.GpuIndexFlatL2",
"argparse.ArgumentParser",
"sklearn.cluster.KMeans",
"faiss.Clustering",
"faiss.GpuIndexFlatConfig",
"faiss.StandardGpuResources",
"numpy.unique",
"sklearn.metrics.cluster.normalized_mutual_info_score"
] |
[((298, 362), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""PyTorch metric learning nmi script"""'}), "(description='PyTorch metric learning nmi script')\n", (312, 362), False, 'from argparse import ArgumentParser\n'), ((1057, 1074), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1066, 1074), True, 'import numpy as np\n'), ((1180, 1232), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['kmeans.labels_', 'labels'], {}), '(kmeans.labels_, labels)\n', (1208, 1232), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((1334, 1362), 'faiss.StandardGpuResources', 'faiss.StandardGpuResources', ([], {}), '()\n', (1360, 1362), False, 'import faiss\n'), ((1381, 1407), 'faiss.GpuIndexFlatConfig', 'faiss.GpuIndexFlatConfig', ([], {}), '()\n', (1405, 1407), False, 'import faiss\n'), ((1456, 1473), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1465, 1473), True, 'import numpy as np\n'), ((1515, 1554), 'faiss.Clustering', 'faiss.Clustering', (['d', 'unique_labels.size'], {}), '(d, unique_labels.size)\n', (1531, 1554), False, 'import faiss\n'), ((1659, 1700), 'faiss.GpuIndexFlatL2', 'faiss.GpuIndexFlatL2', (['res', 'd', 'flat_config'], {}), '(res, d, flat_config)\n', (1679, 1700), False, 'import faiss\n'), ((1844, 1893), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['labels', 'pred_labels'], {}), '(labels, pred_labels)\n', (1872, 1893), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((2078, 2101), 'numpy.load', 'np.load', (['embedding_file'], {}), '(embedding_file)\n', (2085, 2101), True, 'import numpy as np\n'), ((2176, 2195), 'numpy.load', 'np.load', (['lable_file'], {}), '(lable_file)\n', (2183, 2195), True, 'import numpy as np\n'), ((1088, 1152), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'unique_labels.size', 'random_state': '(0)', 'n_jobs': '(-1)'}), '(n_clusters=unique_labels.size, random_state=0, n_jobs=-1)\n', (1094, 1152), False, 'from sklearn.cluster import KMeans\n')]
|
import asyncio
import time
import urllib.parse
from typing import Optional, Tuple
from asgiref.typing import WWWScope
def get_remote_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]:
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
try:
info = socket_info.getpeername()
return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None
except OSError:
# This case appears to inconsistently occur with uvloop
# bound to a unix domain socket.
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
return (str(info[0]), int(info[1]))
return None
def get_local_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]:
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
info = socket_info.getsockname()
return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None
info = transport.get_extra_info("sockname")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
return (str(info[0]), int(info[1]))
return None
def is_ssl(transport: asyncio.Transport) -> bool:
return bool(transport.get_extra_info("sslcontext"))
def get_client_addr(scope: WWWScope) -> str:
client = scope.get("client")
if not client:
return ""
return "%s:%d" % client
def get_path_with_query_string(scope: WWWScope) -> str:
path_with_query_string = urllib.parse.quote(
scope.get("root_path", "") + scope["path"]
)
if scope["query_string"]:
path_with_query_string = "{}?{}".format(
path_with_query_string, scope["query_string"].decode("ascii")
)
return path_with_query_string
class RequestResponseTiming:
# XXX: switch to "time.perf_counter" because apparently on windows
# time.monotonis is using GetTickCount64 which has ~15ms resolution (it
# caused problems in tests on windows)
#
# ref: https://github.com/python-trio/trio/issues/33#issue-202432431
def __init__(self) -> None:
self._request_start_time: Optional[float] = None
self._request_end_time: Optional[float] = None
self._response_start_time: Optional[float] = None
self._response_end_time: Optional[float] = None
def request_started(self) -> None:
self._request_start_time = time.monotonic()
@property
def request_start_time(self) -> float:
if self._request_start_time is None:
raise ValueError("request_started() was not called")
return self._request_start_time
def request_ended(self) -> None:
self._request_end_time = time.monotonic()
@property
def request_end_time(self) -> float:
if self._request_end_time is None:
raise ValueError("request_ended() was not called")
return self._request_end_time
def response_started(self) -> None:
self._response_start_time = time.monotonic()
@property
def response_start_time(self) -> float:
if self._response_start_time is None:
raise ValueError("response_started() was not called")
return self._response_start_time
def response_ended(self) -> None:
self._response_end_time = time.monotonic()
@property
def response_end_time(self) -> float:
if self._response_end_time is None:
raise ValueError("response_ended() was not called")
return self._response_end_time
def request_duration_seconds(self) -> float:
return self.request_end_time - self.request_start_time
def response_duration_seconds(self) -> float:
return self.response_end_time - self.response_start_time
def total_duration_seconds(self) -> float:
return self.response_end_time - self.request_start_time
|
[
"time.monotonic"
] |
[((2503, 2519), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2517, 2519), False, 'import time\n'), ((2799, 2815), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2813, 2815), False, 'import time\n'), ((3093, 3109), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3107, 3109), False, 'import time\n'), ((3395, 3411), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3409, 3411), False, 'import time\n')]
|
#!/usr/bin/env python
# $Id$
"""2 solutions"""
import puzzler
from puzzler.puzzles.pentominoes import Pentominoes3x20Loop
puzzler.run(Pentominoes3x20Loop)
|
[
"puzzler.run"
] |
[((125, 157), 'puzzler.run', 'puzzler.run', (['Pentominoes3x20Loop'], {}), '(Pentominoes3x20Loop)\n', (136, 157), False, 'import puzzler\n')]
|
# -*- coding: utf-8 -*-
# @Author: ahpalmerUNR
# @Date: 2020-12-21 14:38:59
# @Last Modified by: ahpalmerUNR
# @Last Modified time: 2021-05-06 23:10:20
import torch
class MouthMusicMouthModel(torch.nn.Module):
def __init__(self):
super(MouthMusicMouthModel,self).__init__()
self.conv1 = torch.nn.Conv2d(3,62,7,stride=2,padding=3,bias=False)
self.batch1 = torch.nn.BatchNorm2d(62)
self.relu = torch.nn.ReLU(inplace=True)
self.maxPool1 = torch.nn.MaxPool2d(3,2,1,1)
self.conv2 = torch.nn.Conv2d(62,120,7,stride=2,padding=3,bias=False)
self.batch2 = torch.nn.BatchNorm2d(120)
self.conv3 = torch.nn.Conv2d(120,120,7,stride=2,padding=3,bias=False)
self.batch3 = torch.nn.BatchNorm2d(120)
self.maxPool2 = torch.nn.MaxPool2d(3,1,1,1)
self.conv4 = torch.nn.Conv2d(120,120,7,stride=1,padding=3,bias=False)
self.batch4 = torch.nn.BatchNorm2d(120)
self.conv5 = torch.nn.Conv2d(120,120,7,stride=1,padding=3,bias=False)
self.batch5 = torch.nn.BatchNorm2d(120)
self.conv6 = torch.nn.Conv2d(120,30,7,stride=2,padding=3,bias=False)
self.conv7 = torch.nn.Conv2d(30,12,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
out = self.relu(self.batch1(self.conv1(x)))
out = self.maxPool1(out)
out = self.relu(self.batch2(self.conv2(out)))
out = self.batch3(self.conv3(out))
out = self.maxPool2(out)
out = self.relu(self.batch4(self.conv4(out)))
out = self.relu(self.batch5(self.conv5(out)))
out = self.relu(self.conv6(out))
out = self.sigmoid(self.conv7(out))
return out
class MouthMusicEyeModel(torch.nn.Module):
def __init__(self):
super(MouthMusicEyeModel,self).__init__()
self.conv1 = torch.nn.Conv2d(3,64,7,stride=2,padding=3,bias=False)
self.batch1 = torch.nn.BatchNorm2d(64)
self.relu = torch.nn.ReLU(inplace=True)
self.maxPool1 = torch.nn.MaxPool2d(3,2,1,1)
self.conv2 = torch.nn.Conv2d(64,120,7,stride=2,padding=3,bias=False)
self.batch2 = torch.nn.BatchNorm2d(120)
self.conv3 = torch.nn.Conv2d(120,120,7,stride=2,padding=3,bias=False)
self.batch3 = torch.nn.BatchNorm2d(120)
self.maxPool2 = torch.nn.MaxPool2d(3,1,1,1)
self.conv7 = torch.nn.Conv2d(120,16,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
out = self.relu(self.batch1(self.conv1(x)))
out = self.maxPool1(out)
out = self.relu(self.batch2(self.conv2(out)))
out = self.batch3(self.conv3(out))
out = self.maxPool2(out)
out = self.sigmoid(self.conv7(out))
return out
def loadModelMouth(directory,nameRoot):
modelMouth = MouthMusicMouthModel()
modelMouth.load_state_dict(torch.load(directory+"mouth_"+nameRoot, map_location="cpu"))
return modelMouth
def loadModelEyes(directory,nameRoot):
modelEyes = MouthMusicEyeModel()
modelEyes.load_state_dict(torch.load(directory+"eye_"+nameRoot, map_location="cpu"))
return modelEyes
def loadModel(directory,nameRoot):
eyeModel = loadModelEyes(directory,nameRoot)
mouthModel = loadModelMouth(directory,nameRoot)
return mouthModel,eyeModel
|
[
"torch.nn.ReLU",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d",
"torch.nn.Sigmoid"
] |
[((297, 355), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(3)', '(62)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 62, 7, stride=2, padding=3, bias=False)\n', (312, 355), False, 'import torch\n'), ((367, 391), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(62)'], {}), '(62)\n', (387, 391), False, 'import torch\n'), ((406, 433), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (419, 433), False, 'import torch\n'), ((452, 482), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(3)', '(2)', '(1)', '(1)'], {}), '(3, 2, 1, 1)\n', (470, 482), False, 'import torch\n'), ((495, 555), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(62)', '(120)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(62, 120, 7, stride=2, padding=3, bias=False)\n', (510, 555), False, 'import torch\n'), ((567, 592), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(120)'], {}), '(120)\n', (587, 592), False, 'import torch\n'), ((608, 669), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(120)', '(120)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(120, 120, 7, stride=2, padding=3, bias=False)\n', (623, 669), False, 'import torch\n'), ((681, 706), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(120)'], {}), '(120)\n', (701, 706), False, 'import torch\n'), ((725, 755), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(3)', '(1)', '(1)', '(1)'], {}), '(3, 1, 1, 1)\n', (743, 755), False, 'import torch\n'), ((768, 829), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(120)', '(120)', '(7)'], {'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(120, 120, 7, stride=1, padding=3, bias=False)\n', (783, 829), False, 'import torch\n'), ((841, 866), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(120)'], {}), '(120)\n', (861, 866), False, 'import torch\n'), ((882, 943), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(120)', '(120)', '(7)'], {'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(120, 120, 7, stride=1, padding=3, bias=False)\n', (897, 943), False, 'import torch\n'), ((955, 980), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(120)'], {}), '(120)\n', (975, 980), False, 'import torch\n'), ((996, 1056), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(120)', '(30)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(120, 30, 7, stride=2, padding=3, bias=False)\n', (1011, 1056), False, 'import torch\n'), ((1067, 1093), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(30)', '(12)', '(1)'], {}), '(30, 12, 1)\n', (1082, 1093), False, 'import torch\n'), ((1109, 1127), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (1125, 1127), False, 'import torch\n'), ((1644, 1702), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(3)', '(64)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, 7, stride=2, padding=3, bias=False)\n', (1659, 1702), False, 'import torch\n'), ((1714, 1738), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1734, 1738), False, 'import torch\n'), ((1753, 1780), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1766, 1780), False, 'import torch\n'), ((1799, 1829), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(3)', '(2)', '(1)', '(1)'], {}), '(3, 2, 1, 1)\n', (1817, 1829), False, 'import torch\n'), ((1842, 1902), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(64)', '(120)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(64, 120, 7, stride=2, padding=3, bias=False)\n', (1857, 1902), False, 'import torch\n'), ((1914, 1939), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(120)'], {}), '(120)\n', (1934, 1939), False, 'import torch\n'), ((1955, 2016), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(120)', '(120)', '(7)'], {'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(120, 120, 7, stride=2, padding=3, bias=False)\n', (1970, 2016), False, 'import torch\n'), ((2028, 2053), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(120)'], {}), '(120)\n', (2048, 2053), False, 'import torch\n'), ((2072, 2102), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(3)', '(1)', '(1)', '(1)'], {}), '(3, 1, 1, 1)\n', (2090, 2102), False, 'import torch\n'), ((2115, 2142), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(120)', '(16)', '(1)'], {}), '(120, 16, 1)\n', (2130, 2142), False, 'import torch\n'), ((2158, 2176), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (2174, 2176), False, 'import torch\n'), ((2543, 2606), 'torch.load', 'torch.load', (["(directory + 'mouth_' + nameRoot)"], {'map_location': '"""cpu"""'}), "(directory + 'mouth_' + nameRoot, map_location='cpu')\n", (2553, 2606), False, 'import torch\n'), ((2724, 2785), 'torch.load', 'torch.load', (["(directory + 'eye_' + nameRoot)"], {'map_location': '"""cpu"""'}), "(directory + 'eye_' + nameRoot, map_location='cpu')\n", (2734, 2785), False, 'import torch\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
class SpectrogramModel(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size_cnn, stride_cnn, padding_cnn, kernel_size_pool, stride_pool, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(SpectrogramModel, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size_cnn = kernel_size_cnn
self.stride_cnn = stride_cnn
self.padding_cnn = padding_cnn
self.kernel_size_pool = kernel_size_pool
self.stride_pool = stride_pool
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.cnn1 = nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch1 = nn.BatchNorm2d(self.out_channels)
self.cnn2 = nn.Conv2d(self.out_channels, self.out_channels, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch2 = nn.BatchNorm2d(self.out_channels)
self.cnn3 = nn.Conv2d(self.out_channels, self.out_channels*2, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch3 = nn.BatchNorm2d(self.out_channels*2)
self.cnn4 = nn.Conv2d(self.out_channels*2, self.out_channels*2, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)
self.batch4 = nn.BatchNorm2d(self.out_channels*2)
self.relu = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(self.kernel_size_pool//2, stride=self.stride_pool//2)
self.max_pool = nn.MaxPool2d(self.kernel_size_pool, stride=self.stride_pool)
self.max_pool4 = nn.MaxPool2d(int(self.kernel_size_pool*5/4), stride=int(self.stride_pool*5/4))
self.lstm = nn.LSTM(int(640/160) * int(480/160), self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, train=True, multi_gpu=False):
input = input.to(self.device)
target = target.to(self.device)
out = self.cnn1(input)
#print(out.shape)
out = self.batch1(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool1(out)
#print(out.shape)
out = self.cnn2(out)
#print(out.shape)
out = self.batch2(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool(out)
#print(out.shape)
out = self.cnn3(out)
#print(out.shape)
out = self.batch3(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool(out)
#print(out.shape)
out = self.cnn4(out)
#print(out.shape)
out = self.batch4(out)
#print(out.shape)
out = self.relu(out)
#print(out.shape)
out = self.max_pool4(out)
#print(out.shape)
#out = torch.flatten(out, start_dim=2, end_dim=3)
out = out.view(list(out.size())[0], list(out.size())[1], -1)
#pdb.set_trace()
out, hn = self.lstm(out)
# print(out.shape)
out = torch.mean(out, dim=1)
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
if multi_gpu:
out=torch.unsqueeze(out, dim=0)
loss=torch.unsqueeze(loss,dim=0)
return out, loss
|
[
"torch.mean",
"torch.nn.ReLU",
"torch.unsqueeze",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.max",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
] |
[((1211, 1244), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.out_channels'], {}), '(self.out_channels)\n', (1225, 1244), True, 'import torch.nn as nn\n'), ((1423, 1456), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.out_channels'], {}), '(self.out_channels)\n', (1437, 1456), True, 'import torch.nn as nn\n'), ((1637, 1674), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.out_channels * 2)'], {}), '(self.out_channels * 2)\n', (1651, 1674), True, 'import torch.nn as nn\n'), ((1855, 1892), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.out_channels * 2)'], {}), '(self.out_channels * 2)\n', (1869, 1892), True, 'import torch.nn as nn\n'), ((1911, 1920), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1918, 1920), True, 'import torch.nn as nn\n'), ((1946, 2016), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(self.kernel_size_pool // 2)'], {'stride': '(self.stride_pool // 2)'}), '(self.kernel_size_pool // 2, stride=self.stride_pool // 2)\n', (1958, 2016), True, 'import torch.nn as nn\n'), ((2037, 2097), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['self.kernel_size_pool'], {'stride': 'self.stride_pool'}), '(self.kernel_size_pool, stride=self.stride_pool)\n', (2049, 2097), True, 'import torch.nn as nn\n'), ((3806, 3828), 'torch.mean', 'torch.mean', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (3816, 3828), False, 'import torch\n'), ((3970, 3997), 'torch.unsqueeze', 'torch.unsqueeze', (['out'], {'dim': '(0)'}), '(out, dim=0)\n', (3985, 3997), False, 'import torch\n'), ((4015, 4043), 'torch.unsqueeze', 'torch.unsqueeze', (['loss'], {'dim': '(0)'}), '(loss, dim=0)\n', (4030, 4043), False, 'import torch\n'), ((416, 441), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (439, 441), False, 'import torch\n'), ((1054, 1177), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.in_channels', 'self.out_channels', 'self.kernel_size_cnn'], {'stride': 'self.stride_cnn', 'padding': 'self.padding_cnn'}), '(self.in_channels, self.out_channels, self.kernel_size_cnn, stride\n =self.stride_cnn, padding=self.padding_cnn)\n', (1063, 1177), True, 'import torch.nn as nn\n'), ((1265, 1388), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.out_channels', 'self.out_channels', 'self.kernel_size_cnn'], {'stride': 'self.stride_cnn', 'padding': 'self.padding_cnn'}), '(self.out_channels, self.out_channels, self.kernel_size_cnn,\n stride=self.stride_cnn, padding=self.padding_cnn)\n', (1274, 1388), True, 'import torch.nn as nn\n'), ((1477, 1604), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.out_channels', '(self.out_channels * 2)', 'self.kernel_size_cnn'], {'stride': 'self.stride_cnn', 'padding': 'self.padding_cnn'}), '(self.out_channels, self.out_channels * 2, self.kernel_size_cnn,\n stride=self.stride_cnn, padding=self.padding_cnn)\n', (1486, 1604), True, 'import torch.nn as nn\n'), ((1693, 1825), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.out_channels * 2)', '(self.out_channels * 2)', 'self.kernel_size_cnn'], {'stride': 'self.stride_cnn', 'padding': 'self.padding_cnn'}), '(self.out_channels * 2, self.out_channels * 2, self.\n kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn)\n', (1702, 1825), True, 'import torch.nn as nn\n'), ((2445, 2510), 'torch.nn.Linear', 'nn.Linear', (['(self.hidden_dim * self.num_directions)', 'self.num_labels'], {}), '(self.hidden_dim * self.num_directions, self.num_labels)\n', (2454, 2510), True, 'import torch.nn as nn\n'), ((3906, 3926), 'torch.max', 'torch.max', (['target', '(1)'], {}), '(target, 1)\n', (3915, 3926), False, 'import torch\n')]
|
import os
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
from matplotlib.colors import BASE_COLORS, SymLogNorm
from scipy.stats import ttest_ind
from swann.preprocessing import get_info
from swann.utils import get_config, derivative_fname
from swann.analyses import decompose_tfr, find_bursts, get_bursts
from mne.viz import iter_topography
from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR
from mne import Epochs, EvokedArray
def plot_spectrogram(rawf, raw, event, events, bl_events,
method='raw', baseline='z-score',
freqs=np.logspace(np.log(4), np.log(250), 50, base=np.e),
n_cycles=7, use_fft=True, ncols=3, plot_erp=True,
plot_bursts=False, picks=None, verbose=True,
overwrite=False):
''' Plots a bar chart of beta bursts.
Parameters
----------
rawf : pybids.BIDSlayout file
The object containing the raw data.
raw : mne.io.Raw
The raw data object.
event : str
The name of the event (e.g. `Response`).
events : np.array(n_events, 3)
The events from mne.events_from_annotations or mne.find_events
corresponding to the event and trials that are described by the name.
bl_events: np.array(n_events, 3)
The events from mne.events_from_annotations or mne.find_events
corresponding to the baseline for the event and trials
that are described by the name.
method : `raw` | `phase-locked` | `non-phase-locked` | `total'
How to plot the spectrograms:
raw -- plot without averaging power (default)
phase-locked -- just average the event-related potential (ERP)
non-phase-locked -- subtract the ERP from each epoch, do time
frequency decomposition (TFR) then average
total -- do TFR on each epoch and then average
baseline : `z-score` | `gain`
How to baseline specrogram data:
z-score -- for each frequency, subtract the median and divide
by the standard deviation (default)
gain -- divide by median
freqs : np.array
The frequencies over which to compute the spectral data.
n_cycles : int, np.array
The number of cycles to use in the Morlet transform
use_fft : bool
Use Fast Fourier Transform see `mne.time_frequency.tfr.cwt`.
ncols : int
The number of ncols to use in the plot (for `method=raw`).
plot_erp : bool
Whether to plot the event-related potential on top.
plot_bursts : bool
Whether to include vertical bars for when the bursts are detected
(for `method=raw`).
picks : None | list of str
The names of the channels to plot
'''
config = get_config()
raw = raw.copy()
raw.load_data()
if method not in ('raw', 'phase-locked', 'non-phase-locked', 'total'):
raise ValueError('Unrecognized method {}'.format(method))
if picks is None:
picks = raw.ch_names
else:
if isinstance(picks, str):
picks = [picks]
raw = raw.pick_channels(picks)
if method == 'raw' and len(picks) > 1:
raise ValueError('Only one channel can be plotted at a time '
'for raw spectrograms')
plotf = derivative_fname(rawf, 'plots/spectrograms',
'event-{}_spectrogram_{}_{}_power'.format(
event, method, baseline),
config['fig'])
if op.isfile(plotf) and not overwrite:
print('Spectrogram plot for {} already exists, '
'use `overwrite=True` to replot'.format(event))
return
if method == 'raw' and plot_bursts:
bursts = find_bursts(rawf, return_saved=True)
if isinstance(n_cycles, np.ndarray) and len(freqs) != len(n_cycles):
raise ValueError('Mismatch lengths n_cycles {} to freqs {}'.format(
n_cycles, freqs))
epochs = Epochs(raw, events, tmin=config['tmin'] - 1, baseline=None,
tmax=config['tmax'] + 1, preload=True)
# align baseline events with epochs with enough events
bl_events = np.delete(bl_events, [i for i, e in enumerate(bl_events[:, 2])
if e not in epochs.events[:, 2]], axis=0)
bl_epochs = Epochs(raw, bl_events, tmin=config['baseline_tmin'] - 1,
baseline=None, tmax=config['baseline_tmax'] + 1,
preload=True)
cropped_epochs = epochs.copy().crop(tmin=config['tmin'],
tmax=config['tmax'])
cropped_bl_epochs = bl_epochs.copy().crop(
tmin=config['baseline_tmin'], tmax=config['baseline_tmax'])
if method == 'phase-locked':
bl_evoked = EvokedArray(np.median(bl_epochs._data, axis=0),
info=bl_epochs.info, tmin=bl_epochs.tmin,
nave=len(bl_epochs))
bl_evoked_tfr = tfr_morlet(bl_evoked, freqs, n_cycles=n_cycles,
use_fft=use_fft, return_itc=False)
bl_evoked_tfr.crop(tmin=config['baseline_tmin'],
tmax=config['baseline_tmax'])
evoked = EvokedArray(np.median(epochs._data, axis=0),
info=epochs.info, tmin=epochs.tmin,
nave=len(epochs))
evoked_tfr = tfr_morlet(evoked, freqs, n_cycles=n_cycles,
use_fft=use_fft, return_itc=False)
evoked_tfr.crop(tmin=config['tmin'], tmax=config['tmax'])
evoked_tfr.data = \
evoked_tfr.data - np.median(bl_evoked_tfr.data,
axis=2)[:, :, np.newaxis]
evoked_tfr.data /= np.std(bl_evoked_tfr.data, axis=2)[:, :, np.newaxis]
else:
if method == 'non-phase-locked':
epochs._data -= np.median(epochs._data, axis=0)
epochs_data = np.zeros((len(epochs), len(epochs.ch_names), len(freqs),
len(cropped_epochs.times)))
bl_epochs_data = np.zeros((len(bl_epochs), len(bl_epochs.ch_names),
len(freqs), len(cropped_bl_epochs.times)))
epochs_tfr = EpochsTFR(epochs.info, epochs_data, cropped_epochs.times,
freqs, verbose=False)
bl_epochs_tfr = EpochsTFR(bl_epochs.info, bl_epochs_data,
cropped_bl_epochs.times, freqs,
verbose=False)
if method != 'raw':
evoked_tfr_data = np.zeros((len(epochs.ch_names), len(freqs),
len(cropped_epochs.times)))
evoked_tfr = AverageTFR(epochs.info, evoked_tfr_data,
cropped_epochs.times, freqs,
nave=len(epochs))
for i, ch in enumerate(epochs.ch_names):
if verbose:
print('\nComputing TFR ({}/{}) for {}... '
'Computing frequency'.format(i, len(epochs.ch_names),
ch), end=' ', flush=True) # noqa
this_epochs = epochs.copy().pick_channels([ch])
this_bl_epochs = bl_epochs.copy().pick_channels([ch])
for j, freq in enumerate(freqs):
if verbose:
print('{:.2f}'.format(freq), end=' ', flush=True)
this_n_cycles = (n_cycles if isinstance(n_cycles, int) else
n_cycles[i])
this_bl_epochs_tfr = \
tfr_morlet(this_bl_epochs, [freq], n_cycles=this_n_cycles,
use_fft=use_fft, average=False,
return_itc=False, verbose=False)
this_bl_epochs_tfr = this_bl_epochs_tfr.crop(
tmin=config['baseline_tmin'], tmax=config['baseline_tmax'])
this_epochs_tfr = \
tfr_morlet(this_epochs, [freq], n_cycles=this_n_cycles,
use_fft=use_fft, average=False,
return_itc=False, verbose=False)
this_epochs_tfr = this_epochs_tfr.crop(
tmin=config['tmin'], tmax=config['tmax'])
full_data = np.concatenate([this_bl_epochs_tfr.data,
this_epochs_tfr.data], axis=3)
epochs_tfr.data[:, i:i + 1, j:j + 1, :] = this_epochs_tfr.data
epochs_tfr.data[:, i:i + 1, j:j + 1, :] -= \
np.median(full_data, axis=3)[:, :, :, np.newaxis]
epochs_tfr.data[:, i:i + 1, j:j + 1, :] /= \
np.std(full_data, axis=3)[:, :, :, np.newaxis]
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] = \
this_bl_epochs_tfr.data
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] -= \
np.median(full_data, axis=3)[:, :, :, np.newaxis]
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] /= \
np.std(full_data, axis=3)[:, :, :, np.newaxis]
if method != 'raw':
this_evoked_tfr = np.median(epochs_tfr.data[:, i, j],
axis=0)
this_bl_evoked_tfr = np.median(bl_epochs_tfr.data[:, i, j],
axis=0)
evoked_tfr.data[i, j] = \
this_evoked_tfr - np.median(this_bl_evoked_tfr)
evoked_tfr.data[i, j] /= np.std(this_bl_evoked_tfr)
if method == 'raw':
ch_name = epochs_tfr.ch_names[0]
vmin, vmax = np.min(epochs_tfr.data), np.max(epochs_tfr.data)
emin, emax = np.min(cropped_epochs._data), np.max(cropped_epochs._data)
if verbose:
print('Plotting spectrogram for channel {}'.format(ch_name))
if plot_bursts:
n_bursts = len(bursts[bursts['channel'] == ch_name])
print('{} bursts for this channel total'.format(n_bursts))
nrows = int(np.ceil(len(events) / ncols))
fig, axes = plt.subplots(nrows, ncols)
fig.set_size_inches(ncols, nrows)
axes = axes.flatten()
for j, this_tfr in enumerate(epochs_tfr):
evoked_data = (cropped_epochs._data[j, 0], emin, emax)
cmap = _plot_spectrogram(
axes[j], this_tfr[i], epochs_tfr.times,
vmin, vmax, freqs, evoked_data,
show_xticks=j >= len(events) - ncols,
show_yticks=j % ncols == 0,
show_ylabel=j == int(nrows / 2) * ncols)
if plot_bursts:
_plot_bursts(config, events, raw, bursts, j, axes, ch_name)
for ax in axes[len(epochs_tfr):]:
ax.axis('off')
else:
if plot_erp:
evoked_data = np.median(cropped_epochs._data, axis=0)
evoked_data -= np.median(evoked_data, axis=1)[:, np.newaxis]
evoked = EvokedArray(evoked_data, info=epochs.info,
tmin=epochs.tmin, nave=len(epochs))
emin, emax = np.min(evoked.data), np.max(evoked.data)
vmin, vmax = np.min(evoked_tfr.data), np.max(evoked_tfr.data)
if raw.info['dig'] is None:
nrows = int(len(raw.ch_names) ** 0.5)
ncols = int(len(raw.ch_names) / nrows) + 1
fig, axes = plt.subplots(nrows, ncols)
fig.set_size_inches(12, 8)
axes = axes.flatten()
for idx, ax in enumerate(axes):
if idx < len(picks):
cmap = _plot_spectrogram(
ax, evoked_tfr.data[idx], evoked_tfr.times,
vmin, vmax, freqs, ((evoked.data[idx], emin, emax) if
plot_erp else None),
show_xticks=idx >= len(picks) - ncols,
show_yticks=idx % ncols == 0,
show_ylabel=idx == int(nrows / 2) * ncols)
ax.set_title(raw.ch_names[idx])
else:
ax.axis('off')
else:
for ax, idx in iter_topography(raw.info, fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white'):
cmap = _plot_spectrogram(
ax, this_tfr, evoked_tfr.times, vmin, vmax, freqs,
((evoked.data[idx], emin, emax) if plot_erp else None))
fig.subplots_adjust(right=0.85, hspace=0.3)
cax = fig.add_subplot(position=[0.87, 0.1, 0.05, 0.8])
cax = fig.colorbar(cmap, cax=cax, format='{:.2f}',
ticks=[vmin, vmin / 10, vmin / 100,
vmax / 100, vmax / 10, vmax])
cax.set_label(('Log {} Power {} Normalized'.format(method, baseline)
).title())
fig.suptitle('Time Frequency Decomposition for the {} '
'Event, {} Power'.format(event, baseline.capitalize()))
fig.savefig(plotf, dpi=300)
plt.close(fig)
def _plot_spectrogram(ax, this_tfr, times, vmin, vmax,
freqs, evoked_data, show_yticks=True,
show_ylabel=True, show_xticks=True):
'''Plot a single spectrogram'''
cmap = ax.imshow(this_tfr, cmap='RdYlBu_r', aspect='auto',
extent=[0, this_tfr.shape[1], 0, this_tfr.shape[0]],
norm=SymLogNorm(linthresh=(vmax - vmin) / 100,
vmin=vmin, vmax=vmax))
if evoked_data is not None:
evoked, emin, emax = evoked_data
ax2 = ax.twinx()
ax2.set_yticks([])
ax2.plot(range(this_tfr.shape[1]), evoked, alpha=0.25, color='k')
ax2.set_ylim([emin, emax])
ax.invert_yaxis()
if show_yticks:
ax.set_yticks(np.linspace(0, len(freqs), 5))
ax.set_yticklabels(['{:.2f}'.format(f) for f in
freqs[::-int(len(freqs) / 5)]])
else:
ax.set_yticklabels([])
if show_ylabel:
ax.set_ylabel('Frequency (Hz)')
ax.axvline(np.where(times == 0)[0][0], color='k')
if show_xticks:
ax.set_xlabel('Time (s)')
ax.set_xticks(np.linspace(0, len(times), 3))
ax.set_xticklabels(['{:.1f}'.format(t) for t in
np.linspace(times[0], times[-1], 3)])
else:
ax.set_xticks([])
return cmap
def _plot_bursts(config, events, raw, bursts, j, axes, ch_name):
'''Plot bursts on a single spectrogram'''
min_idx = events[j, 0] + raw.info['sfreq'] * config['tmin']
max_idx = events[j, 0] + raw.info['sfreq'] * config['tmax']
these_bursts = bursts[(bursts['channel'] == ch_name) &
(bursts['burst_end'] > min_idx) &
(bursts['burst_start'] < max_idx)]
if these_bursts.size > 0:
for burst_idx in these_bursts.index:
for start_stop in ['burst_start', 'burst_end']:
if (max_idx > these_bursts.loc[burst_idx,
start_stop] >
min_idx):
axes[j].axvline(
x=these_bursts.loc[burst_idx,
start_stop] - min_idx,
color='green')
|
[
"numpy.log",
"swann.utils.get_config",
"mne.viz.iter_topography",
"mne.time_frequency.EpochsTFR",
"matplotlib.pyplot.close",
"numpy.median",
"numpy.std",
"numpy.concatenate",
"mne.Epochs",
"os.path.isfile",
"numpy.min",
"numpy.max",
"mne.time_frequency.tfr_morlet",
"swann.analyses.find_bursts",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.colors.SymLogNorm"
] |
[((2836, 2848), 'swann.utils.get_config', 'get_config', ([], {}), '()\n', (2846, 2848), False, 'from swann.utils import get_config, derivative_fname\n'), ((4064, 4167), 'mne.Epochs', 'Epochs', (['raw', 'events'], {'tmin': "(config['tmin'] - 1)", 'baseline': 'None', 'tmax': "(config['tmax'] + 1)", 'preload': '(True)'}), "(raw, events, tmin=config['tmin'] - 1, baseline=None, tmax=config[\n 'tmax'] + 1, preload=True)\n", (4070, 4167), False, 'from mne import Epochs, EvokedArray\n'), ((4405, 4528), 'mne.Epochs', 'Epochs', (['raw', 'bl_events'], {'tmin': "(config['baseline_tmin'] - 1)", 'baseline': 'None', 'tmax': "(config['baseline_tmax'] + 1)", 'preload': '(True)'}), "(raw, bl_events, tmin=config['baseline_tmin'] - 1, baseline=None,\n tmax=config['baseline_tmax'] + 1, preload=True)\n", (4411, 4528), False, 'from mne import Epochs, EvokedArray\n'), ((13287, 13301), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13296, 13301), True, 'import matplotlib.pyplot as plt\n'), ((624, 633), 'numpy.log', 'np.log', (['(4)'], {}), '(4)\n', (630, 633), True, 'import numpy as np\n'), ((635, 646), 'numpy.log', 'np.log', (['(250)'], {}), '(250)\n', (641, 646), True, 'import numpy as np\n'), ((3595, 3611), 'os.path.isfile', 'op.isfile', (['plotf'], {}), '(plotf)\n', (3604, 3611), True, 'import os.path as op\n'), ((3822, 3858), 'swann.analyses.find_bursts', 'find_bursts', (['rawf'], {'return_saved': '(True)'}), '(rawf, return_saved=True)\n', (3833, 3858), False, 'from swann.analyses import decompose_tfr, find_bursts, get_bursts\n'), ((5060, 5147), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['bl_evoked', 'freqs'], {'n_cycles': 'n_cycles', 'use_fft': 'use_fft', 'return_itc': '(False)'}), '(bl_evoked, freqs, n_cycles=n_cycles, use_fft=use_fft, return_itc\n =False)\n', (5070, 5147), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((5487, 5566), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['evoked', 'freqs'], {'n_cycles': 'n_cycles', 'use_fft': 'use_fft', 'return_itc': '(False)'}), '(evoked, freqs, n_cycles=n_cycles, use_fft=use_fft, return_itc=False)\n', (5497, 5566), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((6324, 6403), 'mne.time_frequency.EpochsTFR', 'EpochsTFR', (['epochs.info', 'epochs_data', 'cropped_epochs.times', 'freqs'], {'verbose': '(False)'}), '(epochs.info, epochs_data, cropped_epochs.times, freqs, verbose=False)\n', (6333, 6403), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((6459, 6551), 'mne.time_frequency.EpochsTFR', 'EpochsTFR', (['bl_epochs.info', 'bl_epochs_data', 'cropped_bl_epochs.times', 'freqs'], {'verbose': '(False)'}), '(bl_epochs.info, bl_epochs_data, cropped_bl_epochs.times, freqs,\n verbose=False)\n', (6468, 6551), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((10298, 10324), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (10310, 10324), True, 'import matplotlib.pyplot as plt\n'), ((4873, 4907), 'numpy.median', 'np.median', (['bl_epochs._data'], {'axis': '(0)'}), '(bl_epochs._data, axis=0)\n', (4882, 4907), True, 'import numpy as np\n'), ((5321, 5352), 'numpy.median', 'np.median', (['epochs._data'], {'axis': '(0)'}), '(epochs._data, axis=0)\n', (5330, 5352), True, 'import numpy as np\n'), ((5846, 5880), 'numpy.std', 'np.std', (['bl_evoked_tfr.data'], {'axis': '(2)'}), '(bl_evoked_tfr.data, axis=2)\n', (5852, 5880), True, 'import numpy as np\n'), ((5978, 6009), 'numpy.median', 'np.median', (['epochs._data'], {'axis': '(0)'}), '(epochs._data, axis=0)\n', (5987, 6009), True, 'import numpy as np\n'), ((9834, 9857), 'numpy.min', 'np.min', (['epochs_tfr.data'], {}), '(epochs_tfr.data)\n', (9840, 9857), True, 'import numpy as np\n'), ((9859, 9882), 'numpy.max', 'np.max', (['epochs_tfr.data'], {}), '(epochs_tfr.data)\n', (9865, 9882), True, 'import numpy as np\n'), ((9904, 9932), 'numpy.min', 'np.min', (['cropped_epochs._data'], {}), '(cropped_epochs._data)\n', (9910, 9932), True, 'import numpy as np\n'), ((9934, 9962), 'numpy.max', 'np.max', (['cropped_epochs._data'], {}), '(cropped_epochs._data)\n', (9940, 9962), True, 'import numpy as np\n'), ((11041, 11080), 'numpy.median', 'np.median', (['cropped_epochs._data'], {'axis': '(0)'}), '(cropped_epochs._data, axis=0)\n', (11050, 11080), True, 'import numpy as np\n'), ((11374, 11397), 'numpy.min', 'np.min', (['evoked_tfr.data'], {}), '(evoked_tfr.data)\n', (11380, 11397), True, 'import numpy as np\n'), ((11399, 11422), 'numpy.max', 'np.max', (['evoked_tfr.data'], {}), '(evoked_tfr.data)\n', (11405, 11422), True, 'import numpy as np\n'), ((11588, 11614), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (11600, 11614), True, 'import matplotlib.pyplot as plt\n'), ((12360, 12461), 'mne.viz.iter_topography', 'iter_topography', (['raw.info'], {'fig_facecolor': '"""white"""', 'axis_facecolor': '"""white"""', 'axis_spinecolor': '"""white"""'}), "(raw.info, fig_facecolor='white', axis_facecolor='white',\n axis_spinecolor='white')\n", (12375, 12461), False, 'from mne.viz import iter_topography\n'), ((13677, 13740), 'matplotlib.colors.SymLogNorm', 'SymLogNorm', ([], {'linthresh': '((vmax - vmin) / 100)', 'vmin': 'vmin', 'vmax': 'vmax'}), '(linthresh=(vmax - vmin) / 100, vmin=vmin, vmax=vmax)\n', (13687, 13740), False, 'from matplotlib.colors import BASE_COLORS, SymLogNorm\n'), ((5723, 5760), 'numpy.median', 'np.median', (['bl_evoked_tfr.data'], {'axis': '(2)'}), '(bl_evoked_tfr.data, axis=2)\n', (5732, 5760), True, 'import numpy as np\n'), ((7714, 7841), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['this_bl_epochs', '[freq]'], {'n_cycles': 'this_n_cycles', 'use_fft': 'use_fft', 'average': '(False)', 'return_itc': '(False)', 'verbose': '(False)'}), '(this_bl_epochs, [freq], n_cycles=this_n_cycles, use_fft=use_fft,\n average=False, return_itc=False, verbose=False)\n', (7724, 7841), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((8098, 8222), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['this_epochs', '[freq]'], {'n_cycles': 'this_n_cycles', 'use_fft': 'use_fft', 'average': '(False)', 'return_itc': '(False)', 'verbose': '(False)'}), '(this_epochs, [freq], n_cycles=this_n_cycles, use_fft=use_fft,\n average=False, return_itc=False, verbose=False)\n', (8108, 8222), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((8427, 8498), 'numpy.concatenate', 'np.concatenate', (['[this_bl_epochs_tfr.data, this_epochs_tfr.data]'], {'axis': '(3)'}), '([this_bl_epochs_tfr.data, this_epochs_tfr.data], axis=3)\n', (8441, 8498), True, 'import numpy as np\n'), ((11108, 11138), 'numpy.median', 'np.median', (['evoked_data'], {'axis': '(1)'}), '(evoked_data, axis=1)\n', (11117, 11138), True, 'import numpy as np\n'), ((11312, 11331), 'numpy.min', 'np.min', (['evoked.data'], {}), '(evoked.data)\n', (11318, 11331), True, 'import numpy as np\n'), ((11333, 11352), 'numpy.max', 'np.max', (['evoked.data'], {}), '(evoked.data)\n', (11339, 11352), True, 'import numpy as np\n'), ((14339, 14359), 'numpy.where', 'np.where', (['(times == 0)'], {}), '(times == 0)\n', (14347, 14359), True, 'import numpy as np\n'), ((8703, 8731), 'numpy.median', 'np.median', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (8712, 8731), True, 'import numpy as np\n'), ((8834, 8859), 'numpy.std', 'np.std', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (8840, 8859), True, 'import numpy as np\n'), ((9072, 9100), 'numpy.median', 'np.median', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (9081, 9100), True, 'import numpy as np\n'), ((9206, 9231), 'numpy.std', 'np.std', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (9212, 9231), True, 'import numpy as np\n'), ((9327, 9370), 'numpy.median', 'np.median', (['epochs_tfr.data[:, i, j]'], {'axis': '(0)'}), '(epochs_tfr.data[:, i, j], axis=0)\n', (9336, 9370), True, 'import numpy as np\n'), ((9460, 9506), 'numpy.median', 'np.median', (['bl_epochs_tfr.data[:, i, j]'], {'axis': '(0)'}), '(bl_epochs_tfr.data[:, i, j], axis=0)\n', (9469, 9506), True, 'import numpy as np\n'), ((9721, 9747), 'numpy.std', 'np.std', (['this_bl_evoked_tfr'], {}), '(this_bl_evoked_tfr)\n', (9727, 9747), True, 'import numpy as np\n'), ((14569, 14604), 'numpy.linspace', 'np.linspace', (['times[0]', 'times[-1]', '(3)'], {}), '(times[0], times[-1], 3)\n', (14580, 14604), True, 'import numpy as np\n'), ((9646, 9675), 'numpy.median', 'np.median', (['this_bl_evoked_tfr'], {}), '(this_bl_evoked_tfr)\n', (9655, 9675), True, 'import numpy as np\n')]
|
# -------------------------------------------------------------------------------------------------------------------- #
# Import packages
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
from .nurbs_surface import NurbsSurface
# -------------------------------------------------------------------------------------------------------------------- #
# Define the bilinear NURBS surface class
# -------------------------------------------------------------------------------------------------------------------- #
class NurbsSurfaceBilinear:
""" Create a NURBS representation of the bilinear patch defined by corners P00, P01, P10, and P11
Create a NURBS representation of the bilinear patch
S(u,v) = (1-v)*[(1-u)*P00 + u*P01] + v*[(1-u)*P10 + u*P11]
Note that a bilinear patch is a ruled surface with segments (P00, P01) and (P10, P11) as generating curves
S(u,v) = (1-v)*C1(u) + v*C2(u)
C1(u) = (1-u)*P00 + u*P01
C2(u) = (1-u)*P10 + u*P11
Parameters
----------
P00, P01, P10, P11 : ndarrays with shape (ndim,)
Coordinates of the corner points defining the bilinear surface (ndim=3)
References
----------
The NURBS book. Chapter 8.2
<NAME> and <NAME>
Springer, second edition
"""
def __init__(self, P00, P01, P10, P11):
# Declare input variables as instance variables
self.P00 = P00
self.P01 = P01
self.P10 = P10
self.P11 = P11
self.ndim = 3
# Check the number of dimensions of the problem
ndims = [np.shape(P00)[0], np.shape(P01)[0], np.shape(P10)[0], np.shape(P11)[0]]
if any([ndim != 3 for ndim in ndims]):
raise Exception("The input points must be three-dimensional")
# Make the bilinear patch NURBS representation
self.NurbsSurface = None
self.make_nurbs_surface()
def make_nurbs_surface(self):
""" Make a NURBS surface representation of the bilinear surface """
# Define the array of control points
n_dim, n, m = self.ndim, 2, 2
P = np.zeros((n_dim, n, m))
P[:, 0, 0] = self.P00
P[:, 1, 0] = self.P01
P[:, 0, 1] = self.P10
P[:, 1, 1] = self.P11
# Create the NURBS surface
self.NurbsSurface = NurbsSurface(control_points=P)
|
[
"numpy.shape",
"numpy.zeros"
] |
[((2206, 2229), 'numpy.zeros', 'np.zeros', (['(n_dim, n, m)'], {}), '((n_dim, n, m))\n', (2214, 2229), True, 'import numpy as np\n'), ((1681, 1694), 'numpy.shape', 'np.shape', (['P00'], {}), '(P00)\n', (1689, 1694), True, 'import numpy as np\n'), ((1699, 1712), 'numpy.shape', 'np.shape', (['P01'], {}), '(P01)\n', (1707, 1712), True, 'import numpy as np\n'), ((1717, 1730), 'numpy.shape', 'np.shape', (['P10'], {}), '(P10)\n', (1725, 1730), True, 'import numpy as np\n'), ((1735, 1748), 'numpy.shape', 'np.shape', (['P11'], {}), '(P11)\n', (1743, 1748), True, 'import numpy as np\n')]
|
"""Calculates the batch_grad derivative."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, List, Tuple
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives.basederivatives import BaseParameterDerivatives
from backpack.extensions.firstorder.base import FirstOrderModuleExtension
from backpack.utils.subsampling import subsample
if TYPE_CHECKING:
from backpack.extensions.firstorder import BatchGrad
class BatchGradBase(FirstOrderModuleExtension):
"""Calculates the batch_grad derivative.
Passes the calls for the parameters to the derivatives class.
Implements functions with method names from params.
If child class wants to overwrite these methods
- for example to support an additional external module -
it can do so using the interface for parameter "param1"::
param1(ext, module, g_inp, g_out, bpQuantities):
return batch_grads
In this case, the method is not overwritten by this class.
"""
def __init__(
self, derivatives: BaseParameterDerivatives, params: List[str]
) -> None:
"""Initializes all methods.
If the param method has already been defined, it is left unchanged.
Args:
derivatives: Derivatives object used to apply parameter Jacobians.
params: List of parameter names.
"""
self._derivatives = derivatives
for param_str in params:
if not hasattr(self, param_str):
setattr(self, param_str, self._make_param_function(param_str))
super().__init__(params=params)
def _make_param_function(
self, param_str: str
) -> Callable[[BatchGrad, Module, Tuple[Tensor], Tuple[Tensor], None], Tensor]:
"""Creates a function that calculates batch_grad w.r.t. param.
Args:
param_str: Parameter name.
Returns:
Function that calculates batch_grad wrt param
"""
def param_function(
ext: BatchGrad,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
bpQuantities: None,
) -> Tensor:
"""Calculates batch_grad with the help of derivatives object.
Args:
ext: extension that is used
module: module that performed forward pass
g_inp: input gradient tensors
g_out: output gradient tensors
bpQuantities: additional quantities for second order
Returns:
Scaled individual gradients
"""
subsampling = ext.get_subsampling()
batch_axis = 0
return self._derivatives.param_mjp(
param_str,
module,
g_inp,
g_out,
subsample(g_out[0], dim=batch_axis, subsampling=subsampling),
sum_batch=False,
subsampling=subsampling,
)
return param_function
|
[
"backpack.utils.subsampling.subsample"
] |
[((2870, 2930), 'backpack.utils.subsampling.subsample', 'subsample', (['g_out[0]'], {'dim': 'batch_axis', 'subsampling': 'subsampling'}), '(g_out[0], dim=batch_axis, subsampling=subsampling)\n', (2879, 2930), False, 'from backpack.utils.subsampling import subsample\n')]
|
# coding:utf-8
#
def two(words):
"""
:param words:
:return:
"""
new = []
s = len(words)
for index in range(s):
w = words[index]
for next_index in range(index + 1, s):
next_w = words[next_index]
new.append(frozenset([w, next_w]))
return new
poemfile = open("five_poem.txt").readlines()
feature = []
n = 1
length = len(poemfile)
for poemline in poemfile:
print("finish:%.5f" % (n / length))
poemline = poemline.strip().replace("\n", "")
sentences = poemline.split(".")
temp = []
for sen in sentences:
if len(sen) != 5:
continue
temp.append(sen[:2])
feature.append(temp)
n += 1
size = len(feature)
word_fre = dict()
for fea in feature:
for word in set(fea):
word_fre[word] = word_fre.get(word, 0) + 1 / size
two_fre = dict()
two_feature = []
#
for fea in feature:
fea = list(set(fea))
two_feature.append(two(fea))
for fea in two_feature:
for word in fea:
two_fre[word] = two_fre.get(word, 0) + 1 / size
#
pro = dict()
for k, v in two_fre.items():
event = list(k)
#
key = event[0]
if key not in pro:
pro[key] = []
pro[key].append(
[event[1], two_fre[k] / word_fre[key]]
)
key = event[1]
if key not in pro:
pro[key] = []
pro[key].append(
[event[0], two_fre[k] / word_fre[key]]
)
#
import json
out = open("pro.json", "w")
json.dump(pro, out)
|
[
"json.dump"
] |
[((1466, 1485), 'json.dump', 'json.dump', (['pro', 'out'], {}), '(pro, out)\n', (1475, 1485), False, 'import json\n')]
|
import json
import re
from jinja2.loaders import BaseLoader
from werkzeug.datastructures import ImmutableMultiDict
from jinja2.sandbox import ImmutableSandboxedEnvironment
import decimal
from mirrors.common.logger import logger
class DjangoLoader(BaseLoader):
def __init__(self):
pass
def get_source(self, environment, _):
return _, None, None
variable_repr_re = re.compile(r'\{\{(.*?)\}\}')
def find_used_variable(content):
matched = variable_repr_re.findall(content)
return [each.split('.')[0] for each in matched]
def str_to_obj(str):
return json.loads(str)
default_jinja_context = {
"len": len,
'float': float,
'decimal': decimal.Decimal,
'str_to_obj': str_to_obj,
}
def jinja_render(content, context):
if not content:
content = {}
from jinja2.runtime import Undefined
env = ImmutableSandboxedEnvironment(
loader=DjangoLoader(),
cache_size=0,
undefined=Undefined,
)
context.update(default_jinja_context)
try:
return env.get_template(content).render(context)
except Exception as e:
logger.debug('----- render content failed -----')
logger.debug(content)
logger.debug('--------------- end -------------')
import traceback
traceback.print_exc()
raise
def jinja_render_many(arr, context):
_SPLITER = '^#^#^'
content = _SPLITER.join(arr)
ret = jinja_render(content, context)
return tuple(ret.split(_SPLITER))
def gql_render(str, context=None):
return str
def parse_gql(gql, model, custom_query_model=None):
from mirrors.libs.advance_search.parser import Parser
from mirrors.libs.advance_search.advsearch_visitor import AdvSearchVisitor, AdvSearchRewriteVisitor
ast = Parser.parse(gql)
# 防止前端传错误的gql,进行重写
visitor = AdvSearchRewriteVisitor()
ast = ast.accept(visitor)
visitor = AdvSearchVisitor(model, custom_query_model)
node = ast.accept(visitor)
return node
def extract_args_or_gql(request_args, key):
if key in request_args:
return request_args[key]
gql = request_args.get('gql')
if not gql:
return None
gql += '&'
re_str = r'\b{0}(__eq|__s)?=(-*\w+)\b'.format(key)
match = re.search(re_str, gql)
if match:
return match.group(2)
else:
return None
class TrackerDict(ImmutableMultiDict):
def __init__(self, *args, **kwargs):
self.keys = set()
super(TrackerDict, self).__init__(*args, **kwargs)
def tracker(self, func):
def wrapper(*args, **kwargs):
if len(args):
key = args[0]
else:
key = kwargs.get('key')
self.keys.add(key)
return func(*args, **kwargs)
return wrapper
def __getitem__(self, item):
self.keys.add(item)
return super(TrackerDict, self).__getitem__(item)
def __getattribute__(self, item):
attr = super(TrackerDict, self).__getattribute__(item)
if item in ('get', 'getlist'):
attr = self.tracker(attr)
return attr
|
[
"traceback.print_exc",
"json.loads",
"mirrors.libs.advance_search.advsearch_visitor.AdvSearchVisitor",
"mirrors.libs.advance_search.parser.Parser.parse",
"mirrors.common.logger.logger.debug",
"mirrors.libs.advance_search.advsearch_visitor.AdvSearchRewriteVisitor",
"re.search",
"re.compile"
] |
[((394, 425), 're.compile', 're.compile', (['"""\\\\{\\\\{(.*?)\\\\}\\\\}"""'], {}), "('\\\\{\\\\{(.*?)\\\\}\\\\}')\n", (404, 425), False, 'import re\n'), ((592, 607), 'json.loads', 'json.loads', (['str'], {}), '(str)\n', (602, 607), False, 'import json\n'), ((1787, 1804), 'mirrors.libs.advance_search.parser.Parser.parse', 'Parser.parse', (['gql'], {}), '(gql)\n', (1799, 1804), False, 'from mirrors.libs.advance_search.parser import Parser\n'), ((1842, 1867), 'mirrors.libs.advance_search.advsearch_visitor.AdvSearchRewriteVisitor', 'AdvSearchRewriteVisitor', ([], {}), '()\n', (1865, 1867), False, 'from mirrors.libs.advance_search.advsearch_visitor import AdvSearchVisitor, AdvSearchRewriteVisitor\n'), ((1917, 1960), 'mirrors.libs.advance_search.advsearch_visitor.AdvSearchVisitor', 'AdvSearchVisitor', (['model', 'custom_query_model'], {}), '(model, custom_query_model)\n', (1933, 1960), False, 'from mirrors.libs.advance_search.advsearch_visitor import AdvSearchVisitor, AdvSearchRewriteVisitor\n'), ((2266, 2288), 're.search', 're.search', (['re_str', 'gql'], {}), '(re_str, gql)\n', (2275, 2288), False, 'import re\n'), ((1128, 1177), 'mirrors.common.logger.logger.debug', 'logger.debug', (['"""----- render content failed -----"""'], {}), "('----- render content failed -----')\n", (1140, 1177), False, 'from mirrors.common.logger import logger\n'), ((1186, 1207), 'mirrors.common.logger.logger.debug', 'logger.debug', (['content'], {}), '(content)\n', (1198, 1207), False, 'from mirrors.common.logger import logger\n'), ((1216, 1265), 'mirrors.common.logger.logger.debug', 'logger.debug', (['"""--------------- end -------------"""'], {}), "('--------------- end -------------')\n", (1228, 1265), False, 'from mirrors.common.logger import logger\n'), ((1299, 1320), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1318, 1320), False, 'import traceback\n')]
|
# <NAME>
import os
import numpy
from get_dataset import get_dataset
from get_model import get_model, save_model
from keras.callbacks import ModelCheckpoint, TensorBoard
epochs = 15
batch_size = 6
def train_model(model, X, X_test, Y, Y_test):
checkpoints = []
if not os.path.exists('Data/Checkpoints/'):
os.makedirs('Data/Checkpoints/')
checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))
'''
# Creates live data:
# For better yield. The duration of the training is extended.
from keras.preprocessing.image import ImageDataGenerator
generated_data = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) # For include left hand data add: 'horizontal_flip = True'
generated_data.fit(X)
model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/batch_size, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints)
'''
model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)
return model
def main():
X, X_test, Y, Y_test = get_dataset()
model = get_model()
model = train_model(model, X, X_test, Y, Y_test)
save_model(model)
return model
if __name__ == '__main__':
main()
|
[
"get_model.get_model",
"os.makedirs",
"keras.callbacks.ModelCheckpoint",
"os.path.exists",
"get_model.save_model",
"keras.callbacks.TensorBoard",
"get_dataset.get_dataset"
] |
[((1485, 1498), 'get_dataset.get_dataset', 'get_dataset', ([], {}), '()\n', (1496, 1498), False, 'from get_dataset import get_dataset\n'), ((1511, 1522), 'get_model.get_model', 'get_model', ([], {}), '()\n', (1520, 1522), False, 'from get_model import get_model, save_model\n'), ((1580, 1597), 'get_model.save_model', 'save_model', (['model'], {}), '(model)\n', (1590, 1597), False, 'from get_model import get_model, save_model\n'), ((276, 311), 'os.path.exists', 'os.path.exists', (['"""Data/Checkpoints/"""'], {}), "('Data/Checkpoints/')\n", (290, 311), False, 'import os\n'), ((321, 353), 'os.makedirs', 'os.makedirs', (['"""Data/Checkpoints/"""'], {}), "('Data/Checkpoints/')\n", (332, 353), False, 'import os\n'), ((378, 536), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""Data/Checkpoints/best_weights.h5"""'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'save_weights_only': '(True)', 'mode': '"""auto"""', 'period': '(1)'}), "('Data/Checkpoints/best_weights.h5', monitor='val_loss',\n verbose=0, save_best_only=True, save_weights_only=True, mode='auto',\n period=1)\n", (393, 536), False, 'from keras.callbacks import ModelCheckpoint, TensorBoard\n'), ((553, 736), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""Data/Checkpoints/./logs"""', 'histogram_freq': '(1)', 'write_graph': '(True)', 'write_images': '(True)', 'embeddings_freq': '(0)', 'embeddings_layer_names': 'None', 'embeddings_metadata': 'None'}), "(log_dir='Data/Checkpoints/./logs', histogram_freq=1,\n write_graph=True, write_images=True, embeddings_freq=0,\n embeddings_layer_names=None, embeddings_metadata=None)\n", (564, 736), False, 'from keras.callbacks import ModelCheckpoint, TensorBoard\n')]
|
from hypothesis import HealthCheck
from hypothesis import given, settings
from hypothesis.extra import numpy as hnp
from pytiff import *
import hypothesis.strategies as st
import numpy as np
import pytest
import subprocess
import tifffile
from skimage.data import coffee
def test_write_rgb(tmpdir_factory):
img = coffee()
filename = str(tmpdir_factory.mktemp("write").join("rgb_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(img, method="tile")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
with Tiff(filename, "w") as handle:
handle.write(img, method="scanline")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
# scanline integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline_set_rows_per_strip(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
rows_per_strip = 1
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline", rows_per_strip=rows_per_strip)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
assert rows_per_strip == handle[0].tags["rows_per_strip"].value
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=20, max_side=20)))
def test_write_int_slices_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img_scanline.tif"))
with Tiff(filename, "w") as handle:
handle.write(data[:, :], method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data[:,:], img)
# tile integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_length=16, tile_width=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_append_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("append_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "a") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "r") as handle:
assert handle.number_of_pages == 2
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img[0])
np.testing.assert_array_equal(data, img[1])
def test_write_chunk(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("chunk_img.tif"))
filename = "test_chunk.tif"
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w") as handle:
chunks = [data1, data2, data3, data4]
handle.new_page((300, 300), dtype=np.uint8, tile_length=16, tile_width=16)
row = 0
col = 0
max_row_end = 0
positions = []
for c in chunks:
shape = c.shape
row_end, col_end = row + shape[0], col + shape[1]
max_row_end = max(max_row_end, row_end)
handle[row:row_end, col:col_end] = c
# save for reading chunks
positions.append([row, row_end, col, col_end])
if col_end >= handle.shape[1]:
col = 0
row = max_row_end
else:
col = col_end
handle.save_page()
with Tiff(filename) as handle:
for pos, chunk in zip(positions, chunks):
row, row_end, col, col_end = pos
data = handle[row:row_end, col:col_end]
assert np.all(data == chunk)
with Tiff(filename) as handle:
with pytest.raises(ValueError):
handle.new_page((50, 50), np.dtype("uint8"))
handle[:, :] = np.random.rand(50, 50)
handle.save_page()
def test_write_chunk_multiple_pages(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("multi_page_chunk_img.tif"))
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w")as handle:
chunks = [data1, data2, data3, data4]
for c in chunks:
shape = c.shape
handle.new_page(shape, dtype=np.uint8, tile_length=16, tile_width=16)
handle[:] = c
with Tiff(filename) as handle:
for page, chunk in enumerate(chunks):
handle.set_page(page)
data = handle[:]
assert data.shape == chunk.shape
assert np.all(data == chunk)
|
[
"numpy.testing.assert_array_equal",
"hypothesis.extra.numpy.integer_dtypes",
"tifffile.TiffFile",
"numpy.dtype",
"numpy.ones",
"hypothesis.extra.numpy.unsigned_integer_dtypes",
"hypothesis.strategies.floats",
"hypothesis.extra.numpy.floating_dtypes",
"hypothesis.settings",
"pytest.raises",
"numpy.random.rand",
"hypothesis.extra.numpy.array_shapes",
"skimage.data.coffee",
"numpy.all"
] |
[((803, 833), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (811, 833), False, 'from hypothesis import given, settings\n'), ((1373, 1403), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (1381, 1403), False, 'from hypothesis import given, settings\n'), ((2088, 2118), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (2096, 2118), False, 'from hypothesis import given, settings\n'), ((2707, 2737), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (2715, 2737), False, 'from hypothesis import given, settings\n'), ((3305, 3335), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (3313, 3335), False, 'from hypothesis import given, settings\n'), ((3850, 3880), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (3858, 3880), False, 'from hypothesis import given, settings\n'), ((4423, 4453), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (4431, 4453), False, 'from hypothesis import given, settings\n'), ((318, 326), 'skimage.data.coffee', 'coffee', ([], {}), '()\n', (324, 326), False, 'from skimage.data import coffee\n'), ((554, 583), 'numpy.all', 'np.all', (['(img == data[:, :, :3])'], {}), '(img == data[:, :, :3])\n', (560, 583), True, 'import numpy as np\n'), ((745, 774), 'numpy.all', 'np.all', (['(img == data[:, :, :3])'], {}), '(img == data[:, :, :3])\n', (751, 774), True, 'import numpy as np\n'), ((1252, 1279), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (1269, 1279), False, 'import tifffile\n'), ((1330, 1370), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (1359, 1370), True, 'import numpy as np\n'), ((1895, 1922), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (1912, 1922), False, 'import tifffile\n'), ((1973, 2013), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (2002, 2013), True, 'import numpy as np\n'), ((2559, 2586), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (2576, 2586), False, 'import tifffile\n'), ((2637, 2683), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data[:, :]', 'img'], {}), '(data[:, :], img)\n', (2666, 2683), True, 'import numpy as np\n'), ((3184, 3211), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (3201, 3211), False, 'import tifffile\n'), ((3262, 3302), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (3291, 3302), True, 'import numpy as np\n'), ((3729, 3756), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (3746, 3756), False, 'import tifffile\n'), ((3807, 3847), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (3836, 3847), True, 'import numpy as np\n'), ((4302, 4329), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (4319, 4329), False, 'import tifffile\n'), ((4380, 4420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (4409, 4420), True, 'import numpy as np\n'), ((5097, 5124), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (5114, 5124), False, 'import tifffile\n'), ((5175, 5218), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img[0]'], {}), '(data, img[0])\n', (5204, 5218), True, 'import numpy as np\n'), ((5227, 5270), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img[1]'], {}), '(data, img[1])\n', (5256, 5270), True, 'import numpy as np\n'), ((5427, 5460), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5434, 5460), True, 'import numpy as np\n'), ((5476, 5509), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5483, 5509), True, 'import numpy as np\n'), ((5525, 5558), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5532, 5558), True, 'import numpy as np\n'), ((5574, 5607), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5581, 5607), True, 'import numpy as np\n'), ((6937, 6970), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (6944, 6970), True, 'import numpy as np\n'), ((6986, 7019), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (6993, 7019), True, 'import numpy as np\n'), ((7035, 7068), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (7042, 7068), True, 'import numpy as np\n'), ((7084, 7117), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (7091, 7117), True, 'import numpy as np\n'), ((6551, 6572), 'numpy.all', 'np.all', (['(data == chunk)'], {}), '(data == chunk)\n', (6557, 6572), True, 'import numpy as np\n'), ((6622, 6647), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6635, 6647), False, 'import pytest\n'), ((6733, 6755), 'numpy.random.rand', 'np.random.rand', (['(50)', '(50)'], {}), '(50, 50)\n', (6747, 6755), True, 'import numpy as np\n'), ((7577, 7598), 'numpy.all', 'np.all', (['(data == chunk)'], {}), '(data == chunk)\n', (7583, 7598), True, 'import numpy as np\n'), ((965, 1031), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (981, 1031), True, 'from hypothesis.extra import numpy as hnp\n'), ((1535, 1601), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (1551, 1601), True, 'from hypothesis.extra import numpy as hnp\n'), ((2250, 2316), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(20)', 'max_side': '(20)'}), '(min_dims=2, max_dims=2, min_side=20, max_side=20)\n', (2266, 2316), True, 'from hypothesis.extra import numpy as hnp\n'), ((2869, 2935), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (2885, 2935), True, 'from hypothesis.extra import numpy as hnp\n'), ((3365, 3400), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (3384, 3400), True, 'from hypothesis.extra import numpy as hnp\n'), ((3412, 3478), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (3428, 3478), True, 'from hypothesis.extra import numpy as hnp\n'), ((3489, 3504), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (3498, 3504), True, 'import hypothesis.strategies as st\n'), ((3910, 3945), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (3929, 3945), True, 'from hypothesis.extra import numpy as hnp\n'), ((3957, 4023), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (3973, 4023), True, 'from hypothesis.extra import numpy as hnp\n'), ((4034, 4049), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (4043, 4049), True, 'import hypothesis.strategies as st\n'), ((4585, 4651), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (4601, 4651), True, 'from hypothesis.extra import numpy as hnp\n'), ((6687, 6704), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (6695, 6704), True, 'import numpy as np\n'), ((873, 907), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (891, 907), True, 'from hypothesis.extra import numpy as hnp\n'), ((909, 952), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (936, 952), True, 'from hypothesis.extra import numpy as hnp\n'), ((1443, 1477), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (1461, 1477), True, 'from hypothesis.extra import numpy as hnp\n'), ((1479, 1522), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (1506, 1522), True, 'from hypothesis.extra import numpy as hnp\n'), ((2158, 2192), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2176, 2192), True, 'from hypothesis.extra import numpy as hnp\n'), ((2194, 2237), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2221, 2237), True, 'from hypothesis.extra import numpy as hnp\n'), ((2777, 2811), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2795, 2811), True, 'from hypothesis.extra import numpy as hnp\n'), ((2813, 2856), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2840, 2856), True, 'from hypothesis.extra import numpy as hnp\n'), ((4493, 4527), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (4511, 4527), True, 'from hypothesis.extra import numpy as hnp\n'), ((4529, 4572), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (4556, 4572), True, 'from hypothesis.extra import numpy as hnp\n')]
|
from app.models.report import FacilityReportModel
from tests.v2.views import TCBase
class TestFacilityReport(TCBase):
"""
시설 고장 신고를 테스트합니다.
"""
def __init__(self, *args, **kwargs):
super(TestFacilityReport, self).__init__(*args, **kwargs)
self.method = self.client.post
self.target_uri = '/student/report/facility'
self.content = 'hello'
self.room = 311
def setUp(self):
super(TestFacilityReport, self).setUp()
# ---
self._request = lambda *, token=self.student_access_token, content=self.content, room=self.room: self.request(
self.method,
self.target_uri,
token,
json={
'content': content,
'room': room
}
)
def testReportSuccess(self):
# (1) 시설고장 신고
resp = self._request()
# (2) status code 201
self.assertEqual(resp.status_code, 201)
# (3) response data
data = resp.json
self.assertIn('id', data)
id = data['id']
self.assertIsInstance(id, str)
self.assertEqual(len(id), 24)
# (4) 데이터베이스 확인
self.assertTrue(FacilityReportModel.objects(id=id, content=self.content, room=self.room))
def testForbidden(self):
self.assertEqual(self._request(token=self.admin_access_token).status_code, 403)
|
[
"app.models.report.FacilityReportModel.objects"
] |
[((1209, 1281), 'app.models.report.FacilityReportModel.objects', 'FacilityReportModel.objects', ([], {'id': 'id', 'content': 'self.content', 'room': 'self.room'}), '(id=id, content=self.content, room=self.room)\n', (1236, 1281), False, 'from app.models.report import FacilityReportModel\n')]
|
from django.conf.urls import url
from .views import index, food_detail, post_food_comment
urlpatterns = [
url(r'^$', index),
url(r'^food/(?P<food_slug>[-\w]+)$', food_detail),
url(r'^food/(?P<food_slug>[-\w]+)/add_comment$', post_food_comment)
]
|
[
"django.conf.urls.url"
] |
[((112, 128), 'django.conf.urls.url', 'url', (['"""^$"""', 'index'], {}), "('^$', index)\n", (115, 128), False, 'from django.conf.urls import url\n'), ((135, 184), 'django.conf.urls.url', 'url', (['"""^food/(?P<food_slug>[-\\\\w]+)$"""', 'food_detail'], {}), "('^food/(?P<food_slug>[-\\\\w]+)$', food_detail)\n", (138, 184), False, 'from django.conf.urls import url\n'), ((190, 257), 'django.conf.urls.url', 'url', (['"""^food/(?P<food_slug>[-\\\\w]+)/add_comment$"""', 'post_food_comment'], {}), "('^food/(?P<food_slug>[-\\\\w]+)/add_comment$', post_food_comment)\n", (193, 257), False, 'from django.conf.urls import url\n')]
|
"""
Sandbox and record suppress all records with a concept_id or concept_code relating to Geo Location information.
Original Issue: DC-1385
suppress all records associated with a GeoLocation identifier concepts in PPI vocabulary
The concept_ids to suppress can be determined from the vocabulary with the following regular expressions.
REGEXP_CONTAINS(concept_code, r'(SitePairing)|(City)|(ArizonaSpecific)|(Michigan)|(_Country)| \
(ExtraConsent_[A-Za-z]+((Care)|(Registered)))')AND concept_class_id = 'Question')
and also covers all the mapped standard concepts for non standard concepts that the regex filters.
"""
# Python Imports
import logging
# Third Party Imports
from google.cloud.exceptions import GoogleCloudError
# Project Imports
from common import OBSERVATION
from common import JINJA_ENV
import constants.cdr_cleaner.clean_cdr as cdr_consts
from cdr_cleaner.cleaning_rules.deid.concept_suppression import \
AbstractBqLookupTableConceptSuppression
LOGGER = logging.getLogger(__name__)
ISSUE_NUMBERS = ['DC1385']
GEO_LOCATION_SUPPRESSION_CONCEPT_TABLE = '_geolocation_identifier_concepts'
GEO_LOCATION_CONCEPT_SUPPRESSION_LOOKUP_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_dataset_id}}.{{lookup_table}}` as(
WITH
geolocation_concept_ids AS (
SELECT
DISTINCT *
FROM
`{{project_id}}.{{dataset_id}}.concept`
WHERE
REGEXP_CONTAINS(concept_code, r'(SitePairing)|(City)|(ArizonaSpecific)|(Michigan)|(_Country)|(ExtraConsent_[A-Za-z]+((Care)|(Registered)))')AND concept_class_id = 'Question')
SELECT
DISTINCT *
FROM
geolocation_concept_ids
UNION DISTINCT
SELECT
DISTINCT *
FROM
`{{project_id}}.{{dataset_id}}.concept`
WHERE
concept_id IN(
SELECT
cr.concept_id_2
FROM
geolocation_concept_ids AS c
JOIN
`{{project_id}}.{{dataset_id}}.concept_relationship` AS cr
ON
c.concept_id = cr.concept_id_1
WHERE
cr.relationship_id = 'Maps to')
)
""")
class GeoLocationConceptSuppression(AbstractBqLookupTableConceptSuppression):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper info.
Set the issue numbers, description and affected datasets. As other
tickets may affect this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = (
'Sandbox and record suppress all records with a concept_id or concept_code '
'relating to Geo Location information. ')
super().__init__(issue_numbers=ISSUE_NUMBERS,
description=desc,
affected_datasets=[cdr_consts.CONTROLLED_TIER_DEID],
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=[OBSERVATION],
concept_suppression_lookup_table=
GEO_LOCATION_SUPPRESSION_CONCEPT_TABLE)
def create_suppression_lookup_table(self, client):
"""
:param client: Bigquery client
:return: None
raises google.cloud.exceptions.GoogleCloudError if a QueryJob fails
"""
concept_suppression_lookup_query = GEO_LOCATION_CONCEPT_SUPPRESSION_LOOKUP_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_dataset_id=self.sandbox_dataset_id,
lookup_table=self.concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def setup_validation(self, client, *args, **keyword_args):
pass
def validate_rule(self, client, *args, **keyword_args):
pass
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
from utils import pipeline_logging
import cdr_cleaner.clean_cdr_engine as clean_engine
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
ARGS = parser.default_parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(GeoLocationConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(GeoLocationConceptSuppression,)])
|
[
"cdr_cleaner.clean_cdr_engine.get_query_list",
"google.cloud.exceptions.GoogleCloudError",
"cdr_cleaner.clean_cdr_engine.clean_dataset",
"cdr_cleaner.args_parser.default_parse_args",
"cdr_cleaner.clean_cdr_engine.add_console_logging",
"utils.pipeline_logging.configure",
"logging.getLogger",
"common.JINJA_ENV.from_string"
] |
[((996, 1023), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1013, 1023), False, 'import logging\n'), ((1177, 2109), 'common.JINJA_ENV.from_string', 'JINJA_ENV.from_string', (['"""\nCREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_dataset_id}}.{{lookup_table}}` as(\n WITH\n geolocation_concept_ids AS (\n SELECT\n DISTINCT *\n FROM\n `{{project_id}}.{{dataset_id}}.concept`\n WHERE\n REGEXP_CONTAINS(concept_code, r\'(SitePairing)|(City)|(ArizonaSpecific)|(Michigan)|(_Country)|(ExtraConsent_[A-Za-z]+((Care)|(Registered)))\')AND concept_class_id = \'Question\')\n SELECT\n DISTINCT *\n FROM\n geolocation_concept_ids\n UNION DISTINCT\n SELECT\n DISTINCT *\n FROM\n `{{project_id}}.{{dataset_id}}.concept`\n WHERE\n concept_id IN(\n SELECT\n cr.concept_id_2\n FROM\n geolocation_concept_ids AS c\n JOIN\n `{{project_id}}.{{dataset_id}}.concept_relationship` AS cr\n ON\n c.concept_id = cr.concept_id_1\n WHERE\n cr.relationship_id = \'Maps to\')\n )\n """'], {}), '(\n """\nCREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_dataset_id}}.{{lookup_table}}` as(\n WITH\n geolocation_concept_ids AS (\n SELECT\n DISTINCT *\n FROM\n `{{project_id}}.{{dataset_id}}.concept`\n WHERE\n REGEXP_CONTAINS(concept_code, r\'(SitePairing)|(City)|(ArizonaSpecific)|(Michigan)|(_Country)|(ExtraConsent_[A-Za-z]+((Care)|(Registered)))\')AND concept_class_id = \'Question\')\n SELECT\n DISTINCT *\n FROM\n geolocation_concept_ids\n UNION DISTINCT\n SELECT\n DISTINCT *\n FROM\n `{{project_id}}.{{dataset_id}}.concept`\n WHERE\n concept_id IN(\n SELECT\n cr.concept_id_2\n FROM\n geolocation_concept_ids AS c\n JOIN\n `{{project_id}}.{{dataset_id}}.concept_relationship` AS cr\n ON\n c.concept_id = cr.concept_id_1\n WHERE\n cr.relationship_id = \'Maps to\')\n )\n """\n )\n', (1198, 2109), False, 'from common import JINJA_ENV\n'), ((4387, 4460), 'utils.pipeline_logging.configure', 'pipeline_logging.configure', ([], {'level': 'logging.DEBUG', 'add_console_handler': '(True)'}), '(level=logging.DEBUG, add_console_handler=True)\n', (4413, 4460), False, 'from utils import pipeline_logging\n'), ((4473, 4500), 'cdr_cleaner.args_parser.default_parse_args', 'parser.default_parse_args', ([], {}), '()\n', (4498, 4500), True, 'import cdr_cleaner.args_parser as parser\n'), ((4536, 4570), 'cdr_cleaner.clean_cdr_engine.add_console_logging', 'clean_engine.add_console_logging', ([], {}), '()\n', (4568, 4570), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((4592, 4719), 'cdr_cleaner.clean_cdr_engine.get_query_list', 'clean_engine.get_query_list', (['ARGS.project_id', 'ARGS.dataset_id', 'ARGS.sandbox_dataset_id', '[(GeoLocationConceptSuppression,)]'], {}), '(ARGS.project_id, ARGS.dataset_id, ARGS.\n sandbox_dataset_id, [(GeoLocationConceptSuppression,)])\n', (4619, 4719), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((4822, 4872), 'cdr_cleaner.clean_cdr_engine.add_console_logging', 'clean_engine.add_console_logging', (['ARGS.console_log'], {}), '(ARGS.console_log)\n', (4854, 4872), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((4881, 5007), 'cdr_cleaner.clean_cdr_engine.clean_dataset', 'clean_engine.clean_dataset', (['ARGS.project_id', 'ARGS.dataset_id', 'ARGS.sandbox_dataset_id', '[(GeoLocationConceptSuppression,)]'], {}), '(ARGS.project_id, ARGS.dataset_id, ARGS.\n sandbox_dataset_id, [(GeoLocationConceptSuppression,)])\n', (4907, 5007), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((3973, 4044), 'google.cloud.exceptions.GoogleCloudError', 'GoogleCloudError', (['f"""Error running job {result.job_id}: {result.errors}"""'], {}), "(f'Error running job {result.job_id}: {result.errors}')\n", (3989, 4044), False, 'from google.cloud.exceptions import GoogleCloudError\n')]
|
#!/usr/bin/env python3
import codecs
from collections import defaultdict
import errno
import json
import os
def tree():
"""
http://recursive-labs.com/blog/2012/05/31/one-line-python-tree-explained/
"""
return defaultdict(tree)
def index_tags():
"""
Iterate through all locally saved JSON files
and generate the Stack Exchange Cross Tag index
"""
# Get the script directory
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
# Let's make our index a defaultdict with autovivification
index = tree()
# Iterate through all files in the data directory
for file in os.listdir(os.path.join(SCRIPT_DIR, '../data')):
# Load the JSON file containing the tags for a site
with codecs.open(
os.path.join(SCRIPT_DIR, '../data/' + file),
'r',
encoding='utf-8'
) as input_file:
tags = json.load(input_file)
# The site ID is the filename minus the (.json) at the end
site_id = file[:-5]
# Iterate through all tags and add them to the index
for tag in tags['items']:
index[tag['name']][site_id] = tag['count'] # Autovivification ftw!
# Create the index directory
try:
os.makedirs(os.path.join(SCRIPT_DIR, '../index'))
# If the directory already exists, ignore the error, otherwise report it
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Save the tag index to a local JSON file
with codecs.open(
os.path.join(SCRIPT_DIR, '../index/index_tags.json'),
'w',
encoding='utf-8'
) as output_file:
json.dump(index, output_file, ensure_ascii=False)
# Some status information for the console
print('Successfully created the tag index.')
# If the script is called directly, execute the index_tags() function
if __name__ == '__main__':
index_tags()
|
[
"json.dump",
"json.load",
"os.path.realpath",
"collections.defaultdict",
"os.path.join"
] |
[((228, 245), 'collections.defaultdict', 'defaultdict', (['tree'], {}), '(tree)\n', (239, 245), False, 'from collections import defaultdict\n'), ((447, 473), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (463, 473), False, 'import os\n'), ((640, 675), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""../data"""'], {}), "(SCRIPT_DIR, '../data')\n", (652, 675), False, 'import os\n'), ((1678, 1727), 'json.dump', 'json.dump', (['index', 'output_file'], {'ensure_ascii': '(False)'}), '(index, output_file, ensure_ascii=False)\n', (1687, 1727), False, 'import json\n'), ((912, 933), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (921, 933), False, 'import json\n'), ((1269, 1305), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""../index"""'], {}), "(SCRIPT_DIR, '../index')\n", (1281, 1305), False, 'import os\n'), ((1556, 1608), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""../index/index_tags.json"""'], {}), "(SCRIPT_DIR, '../index/index_tags.json')\n", (1568, 1608), False, 'import os\n'), ((777, 820), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', "('../data/' + file)"], {}), "(SCRIPT_DIR, '../data/' + file)\n", (789, 820), False, 'import os\n')]
|
from django import template
register = template.Library()
@register.filter
def active_since(region_offers, offer_template):
return region_offers.filter(template=offer_template).first().created_date
|
[
"django.template.Library"
] |
[((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')]
|
import numpy as np
def rd(c1, c2):
return np.sqrt((c1[0]-c2[0])**2+(c1[1]-c2[1])**2+(c1[2]-c2[2])**2)
#rbf as global support spline type
#Gaussian Spline
def rbf(r):
return np.exp(-r**2)
#Spline polynomial
def rbf1(r,deg):
return r**deg
# Global
def rbf2(r):
return np.exp(-r**2)
# %% codecell
|
[
"numpy.exp",
"numpy.sqrt"
] |
[((46, 121), 'numpy.sqrt', 'np.sqrt', (['((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)'], {}), '((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)\n', (53, 121), True, 'import numpy as np\n'), ((181, 196), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (187, 196), True, 'import numpy as np\n'), ((282, 297), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (288, 297), True, 'import numpy as np\n')]
|
import wx
import matplotlib.pyplot as plt
import os
import sys
import subprocess
from PIL import Image
class App(wx.Frame):
def __init__(self, parent, title):
super(App, self).__init__(parent, title = title,size = (640,300))
panel = wx.Panel(self)
sizer = wx.GridBagSizer(5, 4)
#description
text = wx.StaticText(panel, label="Folder path")
sizer.Add(text, pos=(0, 0), flag=wx.TOP|wx.LEFT, border=10)
#input field
self.path = wx.TextCtrl(panel)
sizer.Add(self.path, pos=(1, 0), span=(1, 4), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=10)
self.path.Bind(wx.EVT_TEXT,self.OnKeyTyped)
#hint
text2 = wx.StaticText(panel, style = wx.TE_MULTILINE, label=" Hint: hold Option after right-click on folder to show option to copy its path.")
sizer.Add(text2, pos=(2, 0), span=(1, 3), flag=wx.BOTTOM|wx.TOP|wx.LEFT, border=5)
#button open folder
button_open = wx.Button(panel, label="Choose folder", size=(140, 24))
button_open.Bind(wx.EVT_BUTTON, self.onDir)
sizer.Add(button_open, pos=(1, 4), flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)
#button execute
button_execute = wx.Button(panel, label="Remove Alpha", size=(140, 24))
self.Bind(wx.EVT_BUTTON, self.OnClickedExecute, button_execute)
sizer.Add(button_execute, pos=(2, 4), flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM, border=10)
#output
line = wx.TextCtrl(panel, wx.ID_ANY,style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.TE_RICH2)
sizer.Add(line, pos=(3, 0), span=(1, 5), flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
sys.stdout = line
sizer.AddGrowableCol(1)
sizer.AddGrowableCol(3)
sizer.AddGrowableRow(3)
panel.SetSizer(sizer)
def OnKeyTyped(self, event):
self.path = event.GetString()
def OnClickedExecute(self, event):
button_execute = event.GetEventObject().GetLabel()
program(self.path)
def OnClickedOpen(self, event):
button_open = event.GetEventObject().GetLabel()
onDir(self.path)
#open folder modal
def onDir(self, event):
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
print ("Selected folder is %s" % dlg.GetPath())
self.path = dlg.GetPath()
def remove_alpha(image):
color=(255, 255, 255)
image.load() # treba za split()
background = Image.new('RGB', image.size, color)
background.paste(image, mask=image.split()[3]) # 3 je alpha channel
return background
def program(path):
for subdir, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".png"):
pic_path = path+filename
pic = Image.open(os.path.join(subdir, filename))
if pic.mode in ('RGBA', 'LA') or (pic.mode == 'P' and 'transparency' in pic.info):
pic=remove_alpha(pic)
pic.save(os.path.join(subdir, filename))
print(os.path.join(subdir, filename))
proc = subprocess.Popen("ping %s", shell=True, stdout=subprocess.PIPE)
line = proc.stdout.readline()
app = wx.App(redirect=True)
ex = App(None, 'PNG Alpha remover')
ex.Show()
app.MainLoop()
|
[
"PIL.Image.new",
"subprocess.Popen",
"os.path.join",
"os.walk",
"wx.Panel",
"wx.GridBagSizer",
"wx.StaticText",
"wx.Button",
"wx.TextCtrl",
"wx.App",
"wx.DirDialog"
] |
[((3317, 3338), 'wx.App', 'wx.App', ([], {'redirect': '(True)'}), '(redirect=True)\n', (3323, 3338), False, 'import wx\n'), ((2510, 2545), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'image.size', 'color'], {}), "('RGB', image.size, color)\n", (2519, 2545), False, 'from PIL import Image\n'), ((2692, 2705), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2699, 2705), False, 'import os\n'), ((254, 268), 'wx.Panel', 'wx.Panel', (['self'], {}), '(self)\n', (262, 268), False, 'import wx\n'), ((283, 304), 'wx.GridBagSizer', 'wx.GridBagSizer', (['(5)', '(4)'], {}), '(5, 4)\n', (298, 304), False, 'import wx\n'), ((344, 385), 'wx.StaticText', 'wx.StaticText', (['panel'], {'label': '"""Folder path"""'}), "(panel, label='Folder path')\n", (357, 385), False, 'import wx\n'), ((496, 514), 'wx.TextCtrl', 'wx.TextCtrl', (['panel'], {}), '(panel)\n', (507, 514), False, 'import wx\n'), ((688, 831), 'wx.StaticText', 'wx.StaticText', (['panel'], {'style': 'wx.TE_MULTILINE', 'label': '""" Hint: hold Option after right-click on folder to show option to copy its path."""'}), "(panel, style=wx.TE_MULTILINE, label=\n ' Hint: hold Option after right-click on folder to show option to copy its path.'\n )\n", (701, 831), False, 'import wx\n'), ((960, 1015), 'wx.Button', 'wx.Button', (['panel'], {'label': '"""Choose folder"""', 'size': '(140, 24)'}), "(panel, label='Choose folder', size=(140, 24))\n", (969, 1015), False, 'import wx\n'), ((1202, 1256), 'wx.Button', 'wx.Button', (['panel'], {'label': '"""Remove Alpha"""', 'size': '(140, 24)'}), "(panel, label='Remove Alpha', size=(140, 24))\n", (1211, 1256), False, 'import wx\n'), ((1450, 1551), 'wx.TextCtrl', 'wx.TextCtrl', (['panel', 'wx.ID_ANY'], {'style': '(wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL | wx.TE_RICH2)'}), '(panel, wx.ID_ANY, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.\n HSCROLL | wx.TE_RICH2)\n', (1461, 1551), False, 'import wx\n'), ((2172, 2268), 'wx.DirDialog', 'wx.DirDialog', (['self', '"""Choose a directory:"""'], {'style': '(wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)'}), "(self, 'Choose a directory:', style=wx.DD_DEFAULT_STYLE | wx.\n DD_DIR_MUST_EXIST)\n", (2184, 2268), False, 'import wx\n'), ((2854, 2884), 'os.path.join', 'os.path.join', (['subdir', 'filename'], {}), '(subdir, filename)\n', (2866, 2884), False, 'import os\n'), ((3173, 3236), 'subprocess.Popen', 'subprocess.Popen', (['"""ping %s"""'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "('ping %s', shell=True, stdout=subprocess.PIPE)\n", (3189, 3236), False, 'import subprocess\n'), ((3056, 3086), 'os.path.join', 'os.path.join', (['subdir', 'filename'], {}), '(subdir, filename)\n', (3068, 3086), False, 'import os\n'), ((3114, 3144), 'os.path.join', 'os.path.join', (['subdir', 'filename'], {}), '(subdir, filename)\n', (3126, 3144), False, 'import os\n')]
|
import json
import logging
import os
import sys
import boto3
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
http_pool = urllib3.PoolManager()
secretsmanager_client = boto3.client('secretsmanager')
def changeSyntheticStatus(new_status):
logger.info(f"Start changing Datadog Synthetic status to {new_status}")
try:
datadog_secret_name = os.getenv('datadogSecretName', 'Datadog_API_Key')
except:
logger.error("One of the environmet variable is missing")
raise
try:
get_secret_value_response = secretsmanager_client.get_secret_value(
SecretId=datadog_secret_name
)
if 'SecretString' in get_secret_value_response:
secret_value_str = get_secret_value_response['SecretString']
else:
logger.error(
f"Could not extract secret {datadog_secret_name} from Secrets Manager")
raise
secret_value = json.loads(secret_value_str)
dd_api_key = secret_value['datadog']['api_key']
dd_app_key = secret_value['datadog']['app_key']
except:
logger.error(
"There was an error while getting the parameter from the parameter store")
raise
synthetic_public_id = os.getenv('syntheticPublicId')
datadog_api_endpoint = os.getenv('datadogApiEndpoint')
datadog_endpoint_url = datadog_api_endpoint + \
'synthetics/tests/' + synthetic_public_id + '/status'
logger.info(
f"Changing status to {new_status} for Datadog Synthetic with ID {synthetic_public_id} against endpoint {datadog_endpoint_url}")
body_json = json.dumps({
"new_status": new_status,
})
put_response = http_pool.request('PUT', datadog_endpoint_url,
headers={
'Content-Type': 'application/json',
'DD-API-KEY': dd_api_key,
'DD-APPLICATION-KEY': dd_app_key
},
body=body_json)
if (put_response.status) != 200:
logger.error(
f"HTTP Call to change the status of Datadog Synthetic {synthetic_public_id} to {new_status} failed.")
logger.error(f"HTTP status is {put_response.status}")
raise
else:
decoded_response = json.loads(put_response.data.decode('utf-8'))
if decoded_response: # HTTP response is either true or false
logger.info(
f"Status of Datadog Synthetic {synthetic_public_id} was successfully changed to {new_status}")
else:
logger.error(
f"HTTP Call was successfull but the status of Datadog Synthetic {synthetic_public_id} was NOT changed to {new_status}. Response was {decoded_response}")
raise
def handler(event, context):
logger.info("Start with Datadog Synthetic Scheduler")
try:
synthetic_set_status = event['syntheticSetStatus']
except:
logger.error("Could not extract Synthetic destination status from event")
raise
changeSyntheticStatus(synthetic_set_status)
logger.info("End of Datadog Synthetic Scheduler")
if __name__ == "__main__":
handler(0, 0)
|
[
"json.loads",
"boto3.client",
"json.dumps",
"urllib3.disable_warnings",
"urllib3.PoolManager",
"os.getenv",
"logging.getLogger"
] |
[((78, 104), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (102, 104), False, 'import urllib3\n'), ((115, 134), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (132, 134), False, 'import logging\n'), ((178, 199), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (197, 199), False, 'import urllib3\n'), ((225, 255), 'boto3.client', 'boto3.client', (['"""secretsmanager"""'], {}), "('secretsmanager')\n", (237, 255), False, 'import boto3\n'), ((1291, 1321), 'os.getenv', 'os.getenv', (['"""syntheticPublicId"""'], {}), "('syntheticPublicId')\n", (1300, 1321), False, 'import os\n'), ((1349, 1380), 'os.getenv', 'os.getenv', (['"""datadogApiEndpoint"""'], {}), "('datadogApiEndpoint')\n", (1358, 1380), False, 'import os\n'), ((1667, 1705), 'json.dumps', 'json.dumps', (["{'new_status': new_status}"], {}), "({'new_status': new_status})\n", (1677, 1705), False, 'import json\n'), ((412, 461), 'os.getenv', 'os.getenv', (['"""datadogSecretName"""', '"""Datadog_API_Key"""'], {}), "('datadogSecretName', 'Datadog_API_Key')\n", (421, 461), False, 'import os\n'), ((988, 1016), 'json.loads', 'json.loads', (['secret_value_str'], {}), '(secret_value_str)\n', (998, 1016), False, 'import json\n')]
|
# Generated by Django 2.2.5 on 2019-12-23 06:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20191222_2318'),
]
operations = [
migrations.AlterField(
model_name='order',
name='order_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='注文日時'),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.CharField(max_length=250)),
('quantity', models.IntegerField()),
('price', models.IntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Order')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((371, 431), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""注文日時"""'}), "(auto_now_add=True, verbose_name='注文日時')\n", (391, 431), False, 'from django.db import migrations, models\n'), ((550, 643), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (566, 643), False, 'from django.db import migrations, models\n'), ((670, 702), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (686, 702), False, 'from django.db import migrations, models\n'), ((734, 755), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (753, 755), False, 'from django.db import migrations, models\n'), ((784, 805), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (803, 805), False, 'from django.db import migrations, models\n'), ((834, 920), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""orders.Order"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'orders.Order')\n", (851, 920), False, 'from django.db import migrations, models\n')]
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for aws_encryption_sdk.internal.arn functions."""
import pytest
from aws_encryption_sdk.exceptions import MalformedArnError
from aws_encryption_sdk.internal.arn import Arn
class TestArn(object):
def test_malformed_arn_missing_arn(self):
arn = "aws:kms:us-east-1:222222222222:key/aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_service(self):
arn = "aws:us-east-1:222222222222:key/aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_region(self):
arn = "arn:aws:222222222222:key/aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_account(self):
arn = "arn:aws:us-east-1:key/aaaaaaaa-1111-2222-<KEY>"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_malformed_arn_missing_resource_type(self):
arn = "arn:aws:us-east-1:222222222222"
with pytest.raises(MalformedArnError) as excinfo:
Arn.from_str(arn)
excinfo.match("Resource {} could not be parsed as an ARN".format(arn))
def test_parse_key_arn_success(self):
arn_str = "arn:aws:kms:us-east-1:222222222222:key/aaaaaaaa-1111-2222-3333-<KEY>"
arn = Arn.from_str(arn_str)
assert arn.partition == "aws"
assert arn.service == "kms"
assert arn.region == "us-east-1"
assert arn.account_id == "222222222222"
assert arn.resource_type == "key"
assert arn.resource_id == "aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb"
def test_parse_alias_arn_success(self):
arn_str = "arn:aws:kms:us-east-1:222222222222:alias/aws/service"
arn = Arn.from_str(arn_str)
assert arn.partition == "aws"
assert arn.service == "kms"
assert arn.region == "us-east-1"
assert arn.account_id == "222222222222"
assert arn.resource_type == "alias"
assert arn.resource_id == "aws/service"
|
[
"aws_encryption_sdk.internal.arn.Arn.from_str",
"pytest.raises"
] |
[((2380, 2401), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn_str'], {}), '(arn_str)\n', (2392, 2401), False, 'from aws_encryption_sdk.internal.arn import Arn\n'), ((2814, 2835), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn_str'], {}), '(arn_str)\n', (2826, 2835), False, 'from aws_encryption_sdk.internal.arn import Arn\n'), ((926, 958), 'pytest.raises', 'pytest.raises', (['MalformedArnError'], {}), '(MalformedArnError)\n', (939, 958), False, 'import pytest\n'), ((983, 1000), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn'], {}), '(arn)\n', (995, 1000), False, 'from aws_encryption_sdk.internal.arn import Arn\n'), ((1229, 1261), 'pytest.raises', 'pytest.raises', (['MalformedArnError'], {}), '(MalformedArnError)\n', (1242, 1261), False, 'import pytest\n'), ((1286, 1303), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn'], {}), '(arn)\n', (1298, 1303), False, 'from aws_encryption_sdk.internal.arn import Arn\n'), ((1525, 1557), 'pytest.raises', 'pytest.raises', (['MalformedArnError'], {}), '(MalformedArnError)\n', (1538, 1557), False, 'import pytest\n'), ((1582, 1599), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn'], {}), '(arn)\n', (1594, 1599), False, 'from aws_encryption_sdk.internal.arn import Arn\n'), ((1807, 1839), 'pytest.raises', 'pytest.raises', (['MalformedArnError'], {}), '(MalformedArnError)\n', (1820, 1839), False, 'import pytest\n'), ((1864, 1881), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn'], {}), '(arn)\n', (1876, 1881), False, 'from aws_encryption_sdk.internal.arn import Arn\n'), ((2079, 2111), 'pytest.raises', 'pytest.raises', (['MalformedArnError'], {}), '(MalformedArnError)\n', (2092, 2111), False, 'import pytest\n'), ((2136, 2153), 'aws_encryption_sdk.internal.arn.Arn.from_str', 'Arn.from_str', (['arn'], {}), '(arn)\n', (2148, 2153), False, 'from aws_encryption_sdk.internal.arn import Arn\n')]
|
from django.db import models
# Create your models here.
class Student(models.Model):
name = models.CharField(max_length = 20, null = False)#建立字串長度最大為20,且欄位不可空白。
sex = models.CharField(max_length = 2, default = 'M', null = False)
birthday = models.DateField(null = False)
phone = models.CharField(max_length = 20, null = False)
class Meta:
db_table = "student"
|
[
"django.db.models.CharField",
"django.db.models.DateField"
] |
[((97, 140), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(False)'}), '(max_length=20, null=False)\n', (113, 140), False, 'from django.db import models\n'), ((176, 231), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'default': '"""M"""', 'null': '(False)'}), "(max_length=2, default='M', null=False)\n", (192, 231), False, 'from django.db import models\n'), ((253, 281), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(False)'}), '(null=False)\n', (269, 281), False, 'from django.db import models\n'), ((296, 339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(False)'}), '(max_length=20, null=False)\n', (312, 339), False, 'from django.db import models\n')]
|
#!/usr/bin/env python
'''This module contains routines to perform Gram-Schmidt orthonormalization on
a sequence of vectors.
'''
import numpy as np
import numpy.linalg as la
def gso(A, overwrite=False, out=None):
'''Performs Gram-Schmidt orthonormalization on a sequence of vectors.
Parameters
----------
A : ndarray
(M x N) ndarray with M <= N. The rows of A contain the sequence of
vectors.
overwrite : bool, optional
If `True`, the matrix A is overwritten.
out : ndarray, optional
(M x N) ndarray with M <= N. The rows of `out` contain the sequence of
orthonormal vectors. If `overwrite = True`, `out` is neglected.
Returns
-------
output : ndarray
(M x N) ndarray with M <= N. The rows of `out` contain the sequence of
orthonormal vectors.
Notes
-----
See <NAME> <NAME>, Matrix Computations, 3rd edition, Section 5.2.8,
Algorithm 5.2.5, p. 231.
'''
assert A.shape[0] <= A.shape[1]
M = A.shape[0]
if overwrite:
output = A
else:
if out is not None:
output = out
else:
output = np.zeros_like(A)
output[:,:] = A
for i in range(M):
output[i,:] = output[i,:]/la.norm(output[i,:])
for j in range(i+1, M):
output[j,:] = output[j,:] - np.dot(output[j,:], output[i,:])*output[i,:]
return output
if __name__ == '__main__':
A = np.random.random((6,6))
print('A')
print(A)
out = gso(A)
print('\n')
print(out)
print('\n')
print(np.dot(out.T, out))
for i in range(A.shape[0]):
for j in range(A.shape[0]):
print(i, j, np.dot(out[i,:], out[j,:]))
print('\n')
|
[
"numpy.dot",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.zeros_like"
] |
[((1456, 1480), 'numpy.random.random', 'np.random.random', (['(6, 6)'], {}), '((6, 6))\n', (1472, 1480), True, 'import numpy as np\n'), ((1582, 1600), 'numpy.dot', 'np.dot', (['out.T', 'out'], {}), '(out.T, out)\n', (1588, 1600), True, 'import numpy as np\n'), ((1161, 1177), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (1174, 1177), True, 'import numpy as np\n'), ((1263, 1284), 'numpy.linalg.norm', 'la.norm', (['output[i, :]'], {}), '(output[i, :])\n', (1270, 1284), True, 'import numpy.linalg as la\n'), ((1694, 1722), 'numpy.dot', 'np.dot', (['out[i, :]', 'out[j, :]'], {}), '(out[i, :], out[j, :])\n', (1700, 1722), True, 'import numpy as np\n'), ((1356, 1390), 'numpy.dot', 'np.dot', (['output[j, :]', 'output[i, :]'], {}), '(output[j, :], output[i, :])\n', (1362, 1390), True, 'import numpy as np\n')]
|
import os
from . import Builder, BuilderOptions
from .. import types
from ..environment import get_cmd
from ..freezedried import FreezeDried
from ..log import LogFile
from ..path import pushd
from ..shell import ShellArguments
_known_install_types = ('prefix', 'exec-prefix', 'bindir', 'libdir',
'includedir')
@FreezeDried.fields(rehydrate={'extra_args': ShellArguments})
class Bfg9000Builder(Builder):
type = 'bfg9000'
_version = 1
_path_bases = ('srcdir', 'builddir')
class Options(BuilderOptions):
type = 'bfg9000'
_version = 1
@staticmethod
def upgrade(config, version):
return config
def __init__(self):
self.toolchain = types.Unset
def __call__(self, *, toolchain=types.Unset, config_file,
_symbols, _child_config=False):
if not _child_config and self.toolchain is types.Unset:
T = types.TypeCheck(locals(), _symbols)
config_dir = os.path.dirname(config_file)
T.toolchain(types.maybe_raw(types.path_string(config_dir)))
@staticmethod
def upgrade(config, version):
return config
def __init__(self, name, *, extra_args=None, submodules, **kwargs):
super().__init__(name, **kwargs)
T = types.TypeCheck(locals(), self._expr_symbols)
T.extra_args(types.shell_args(none_ok=True))
def set_usage(self, usage=None, **kwargs):
if usage is None:
usage = 'pkg_config'
super().set_usage(usage, **kwargs)
def _toolchain_args(self, toolchain):
return ['--toolchain', toolchain] if toolchain else []
def _install_args(self, deploy_paths):
args = []
for k, v in deploy_paths.items():
if k in _known_install_types:
args.extend(['--' + k, v])
return args
def build(self, pkgdir, srcdir):
builddir = self._builddir(pkgdir)
bfg9000 = get_cmd(self._common_options.env, 'BFG9000', 'bfg9000')
ninja = get_cmd(self._common_options.env, 'NINJA', 'ninja')
with LogFile.open(pkgdir, self.name) as logfile:
with pushd(srcdir):
logfile.check_call(
bfg9000 + ['configure', builddir] +
self._toolchain_args(self._this_options.toolchain) +
self._install_args(self._common_options.deploy_paths) +
self.extra_args.fill(srcdir=srcdir, builddir=builddir)
)
with pushd(builddir):
logfile.check_call(ninja)
def deploy(self, pkgdir, srcdir):
ninja = get_cmd(self._common_options.env, 'NINJA', 'ninja')
with LogFile.open(pkgdir, self.name, kind='deploy') as logfile:
with pushd(self._builddir(pkgdir)):
logfile.check_call(ninja + ['install'])
|
[
"os.path.dirname"
] |
[((1022, 1050), 'os.path.dirname', 'os.path.dirname', (['config_file'], {}), '(config_file)\n', (1037, 1050), False, 'import os\n')]
|
from __future__ import print_function
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.tests.test_var import TestVARResults
test_VAR = TestVARResults()
test_VAR.test_reorder()
|
[
"statsmodels.tsa.vector_ar.tests.test_var.TestVARResults"
] |
[((148, 164), 'statsmodels.tsa.vector_ar.tests.test_var.TestVARResults', 'TestVARResults', ([], {}), '()\n', (162, 164), False, 'from statsmodels.tsa.vector_ar.tests.test_var import TestVARResults\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.