code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import unittest
from time import time
from pickle import load, dump
from tempfile import mkstemp
from random import choice, randint
from string import ascii_letters
from numpy import corrcoef, random, abs, max, asarray, round, zeros_like
from trlda.models import BatchLDA
from trlda.utils import sample_dirichlet
class Tests(unittest.TestCase):
def test_basics(self):
W = 102
D = 1010
K = 11
alpha = .27
eta = 3.1
model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta)
self.assertEqual(K, model.num_topics)
self.assertEqual(K, model.alpha.size)
self.assertEqual(W, model.num_words)
self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)])
self.assertEqual(eta, model.eta)
with self.assertRaises(RuntimeError):
model.alpha = random.rand(K + 1)
alpha = random.rand(K, 1)
model.alpha = alpha
self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20)
def test_empirical_bayes_alpha(self):
model = BatchLDA(
num_words=4,
num_topics=2,
alpha=[.2, .05],
eta=.2)
model.lambdas = [
[100, 100, 1e-16, 1e-16],
[1e-16, 1e-16, 100, 100]]
documents = model.sample(num_documents=100, length=20)
# set alpha to wrong values
model.alpha = [4., 4.]
model.update_parameters(documents,
max_epochs=10,
max_iter_inference=200,
update_lambda=False,
update_alpha=True,
emp_bayes_threshold=0.)
# make sure empirical Bayes went in the right direction
self.assertGreater(model.alpha[0], model.alpha[1])
self.assertLess(model.alpha[0], 4.)
self.assertLess(model.alpha[1], 4.)
def test_empirical_bayes_eta(self):
for eta, initial_eta in [(.045, .2), (.41, .2)]:
model = BatchLDA(
num_words=100,
num_topics=10,
alpha=[.1, .1],
eta=initial_eta)
# this will sample a beta with the given eta
model.lambdas = zeros_like(model.lambdas) + eta
documents = model.sample(500, 10)
model.update_parameters(documents,
max_epochs=10,
update_eta=True,
emp_bayes_threshold=0.)
# optimization should at least walk in the right direction and don't explode
self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta))
def test_pickle(self):
model0 = BatchLDA(
num_words=300,
num_topics=50,
alpha=random.rand(),
eta=random.rand())
tmp_file = mkstemp()[1]
# save model
with open(tmp_file, 'w') as handle:
dump({'model': model0}, handle)
# load model
with open(tmp_file) as handle:
model1 = load(handle)['model']
# make sure parameters haven't changed
self.assertEqual(model0.num_words, model1.num_words)
self.assertEqual(model0.num_topics, model1.num_topics)
self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20)
self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20)
self.assertLess(abs(model0.eta - model1.eta), 1e-20)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"trlda.models.BatchLDA",
"pickle.dump",
"numpy.zeros_like",
"numpy.abs",
"random.randint",
"tempfile.mkstemp",
"pickle.load",
"numpy.random.rand"
] |
[((2869, 2884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2882, 2884), False, 'import unittest\n'), ((438, 495), 'trlda.models.BatchLDA', 'BatchLDA', ([], {'num_words': 'W', 'num_topics': 'K', 'alpha': 'alpha', 'eta': 'eta'}), '(num_words=W, num_topics=K, alpha=alpha, eta=eta)\n', (446, 495), False, 'from trlda.models import BatchLDA\n'), ((805, 822), 'numpy.random.rand', 'random.rand', (['K', '(1)'], {}), '(K, 1)\n', (816, 822), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((969, 1032), 'trlda.models.BatchLDA', 'BatchLDA', ([], {'num_words': '(4)', 'num_topics': '(2)', 'alpha': '[0.2, 0.05]', 'eta': '(0.2)'}), '(num_words=4, num_topics=2, alpha=[0.2, 0.05], eta=0.2)\n', (977, 1032), False, 'from trlda.models import BatchLDA\n'), ((775, 793), 'numpy.random.rand', 'random.rand', (['(K + 1)'], {}), '(K + 1)\n', (786, 793), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((1682, 1755), 'trlda.models.BatchLDA', 'BatchLDA', ([], {'num_words': '(100)', 'num_topics': '(10)', 'alpha': '[0.1, 0.1]', 'eta': 'initial_eta'}), '(num_words=100, num_topics=10, alpha=[0.1, 0.1], eta=initial_eta)\n', (1690, 1755), False, 'from trlda.models import BatchLDA\n'), ((2312, 2321), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (2319, 2321), False, 'from tempfile import mkstemp\n'), ((2382, 2413), 'pickle.dump', 'dump', (["{'model': model0}", 'handle'], {}), "({'model': model0}, handle)\n", (2386, 2413), False, 'from pickle import load, dump\n'), ((2801, 2829), 'numpy.abs', 'abs', (['(model0.eta - model1.eta)'], {}), '(model0.eta - model1.eta)\n', (2804, 2829), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((662, 679), 'random.randint', 'randint', (['(0)', '(K - 1)'], {}), '(0, K - 1)\n', (669, 679), False, 'from random import choice, randint\n'), ((1839, 1864), 'numpy.zeros_like', 'zeros_like', (['model.lambdas'], {}), '(model.lambdas)\n', (1849, 1864), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2116, 2136), 'numpy.abs', 'abs', (['(model.eta - eta)'], {}), '(model.eta - eta)\n', (2119, 2136), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2138, 2166), 'numpy.abs', 'abs', (['(model.eta - initial_eta)'], {}), '(model.eta - initial_eta)\n', (2141, 2166), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2261, 2274), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (2272, 2274), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2283, 2296), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (2294, 2296), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2475, 2487), 'pickle.load', 'load', (['handle'], {}), '(handle)\n', (2479, 2487), False, 'from pickle import load, dump\n'), ((2673, 2709), 'numpy.abs', 'abs', (['(model0.lambdas - model1.lambdas)'], {}), '(model0.lambdas - model1.lambdas)\n', (2676, 2709), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2741, 2773), 'numpy.abs', 'abs', (['(model0.alpha - model1.alpha)'], {}), '(model0.alpha - model1.alpha)\n', (2744, 2773), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n')]
|
from django.test import TestCase
from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data
class TestPPDData(TestCase):
def test_get_ppr_data_comparative_month(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
5,
2,
'month',
'comparative',
{
'aggregation_level': 2,
'state_id': 'st1',
},
False
)
expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1',
'value': '142.40%'}],
'Worst performers': [{'place': 'd1',
'value': '142.40%'}],
'indicator': 'AWC Open'},
{'Best performers': [{'place': 'd1', 'value': '1.62%'}],
'Worst performers': [{'place': 'd1',
'value': '1.62%'}],
'indicator': 'Home Visits'}]],
'Service Delivery': [
[{'Best performers': [{'place': 'd1', 'value': '1.45%'}],
'Worst performers': [{'place': 'd1', 'value': '1.45%'}],
'indicator': 'Pre-school Education'},
{'Best performers': [{'place': 'd1', 'value': '66.74%'}],
'Worst performers': [{'place': 'd1',
'value': '66.74%'}],
'indicator': 'Weighing efficiency'}],
[{'Best performers': [{'place': 'd1', 'value': '1.47%'}],
'Worst performers': [{'place': 'd1', 'value': '1.47%'}],
'indicator': 'Height Measurement Efficiency'},
{'Best performers': [{'place': 'd1', 'value': '72.97%'}],
'Worst performers': [{'place': 'd1',
'value': '72.97%'}],
'indicator': 'Counselling'}],
[{'Best performers': [{'place': 'd1', 'value': '28.67%'}],
'Worst performers': [{'place': 'd1',
'value': '28.67%'}],
'indicator': 'Take Home Ration'},
{'Best performers': [{'place': 'd1', 'value': '0.83%'}],
'Worst performers': [{'place': 'd1', 'value': '0.83%'}],
'indicator': 'Supplementary Nutrition'}]]}
self.assertDictEqual(expected, data)
def test_get_ppr_data_comparative_quarter(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
None,
2,
'quarter',
'comparative',
{
'aggregation_level': 1,
},
False
)
expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1',
'value': '64.80%'},
{'place': 'st2',
'value': '47.76%'},
{'place': 'st7',
'value': '0.00%'}],
'Worst performers': [{'place': 'st7',
'value': '0.00%'},
{'place': 'st2',
'value': '47.76%'},
{'place': 'st1',
'value': '64.80%'}],
'indicator': 'AWC Open'},
{'Best performers': [{'place': 'st1', 'value': '0.66%'},
{'place': 'st2', 'value': '0.00%'},
{'place': 'st7',
'value': '0.00%'}],
'Worst performers': [{'place': 'st7',
'value': '0.00%'},
{'place': 'st2',
'value': '0.00%'},
{'place': 'st1',
'value': '0.66%'}],
'indicator': 'Home Visits'}]],
'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'},
{'place': 'st1', 'value': '2.52%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '2.52%'},
{'place': 'st2',
'value': '8.41%'}],
'indicator': 'Pre-school Education'},
{'Best performers': [{'place': 'st2', 'value': '70.40%'},
{'place': 'st1', 'value': '67.39%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '67.39%'},
{'place': 'st2',
'value': '70.40%'}],
'indicator': 'Weighing efficiency'}],
[{'Best performers': [{'place': 'st2', 'value': '2.89%'},
{'place': 'st1', 'value': '1.44%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '1.44%'},
{'place': 'st2',
'value': '2.89%'}],
'indicator': 'Height Measurement Efficiency'},
{'Best performers': [{'place': 'st1', 'value': '60.32%'},
{'place': 'st2', 'value': '57.97%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st2', 'value': '57.97%'},
{'place': 'st1',
'value': '60.32%'}],
'indicator': 'Counselling'}],
[{'Best performers': [{'place': 'st2', 'value': '34.75%'},
{'place': 'st1', 'value': '14.60%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '14.60%'},
{'place': 'st2',
'value': '34.75%'}],
'indicator': 'Take Home Ration'},
{'Best performers': [{'place': 'st2', 'value': '1.10%'},
{'place': 'st1', 'value': '0.95%'},
{'place': 'st7', 'value': '0.00%'}],
'Worst performers': [{'place': 'st7', 'value': '0.00%'},
{'place': 'st1', 'value': '0.95%'},
{'place': 'st2',
'value': '1.10%'}],
'indicator': 'Supplementary Nutrition'}]]}
self.assertDictEqual(expected, data)
def test_get_ppr_data_aggregated_month(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
5,
2,
'month',
'aggregated',
{
'aggregation_level': 1,
},
False
)
expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '118.18%',
'% of Home Visits': '0.79%',
'Number of AWCs Launched': 22,
'Number of Blocks Covered': 5,
'Number of Districts Covered': 4,
'Number of States Covered': 3},
'Service Delivery': {
'% of children between 3-6 years provided PSE for atleast 21+ days': '6.66%',
'% of children between 3-6 years provided SNP for atleast 21+ days': '1.51%',
'% of children between 6 months -3 years, P&LW provided THR for atleast 21+ days': '43.65%',
'% of trimester three women counselled on immediate and EBF': '72.15%',
'Height Measurement Efficiency': '3.24%',
'Weighing efficiency': '70.27%'}}
self.assertDictEqual(expected, data)
def test_get_ppr_data_aggregated_quarter(self):
self.maxDiff = None
data = get_poshan_progress_dashboard_data(
'icds-cas',
2017,
None,
2,
'quarter',
'aggregated',
{
'aggregation_level': 1,
},
False
)
expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '53.27%',
'% of Home Visits': '0.32%',
'Number of AWCs Launched': 22,
'Number of Blocks Covered': 5,
'Number of Districts Covered': 4,
'Number of States Covered': 3},
'Service Delivery': {
'% of children between 3-6 years provided PSE for atleast 21+ days': '5.54%',
'% of children between 3-6 years provided SNP for atleast 21+ days': '1.08%',
'% of children between 6 months -3 years, P&LW provided THR for atleast 21+ days': '25.32%',
'% of trimester three women counselled on immediate and EBF': '59.09%',
'Height Measurement Efficiency': '2.24%',
'Weighing efficiency': '68.81%'}}
self.assertDictEqual(expected, data)
|
[
"custom.icds_reports.reports.poshan_progress_dashboard_data.get_poshan_progress_dashboard_data"
] |
[((266, 404), 'custom.icds_reports.reports.poshan_progress_dashboard_data.get_poshan_progress_dashboard_data', 'get_poshan_progress_dashboard_data', (['"""icds-cas"""', '(2017)', '(5)', '(2)', '"""month"""', '"""comparative"""', "{'aggregation_level': 2, 'state_id': 'st1'}", '(False)'], {}), "('icds-cas', 2017, 5, 2, 'month',\n 'comparative', {'aggregation_level': 2, 'state_id': 'st1'}, False)\n", (300, 404), False, 'from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data\n'), ((3012, 3136), 'custom.icds_reports.reports.poshan_progress_dashboard_data.get_poshan_progress_dashboard_data', 'get_poshan_progress_dashboard_data', (['"""icds-cas"""', '(2017)', 'None', '(2)', '"""quarter"""', '"""comparative"""', "{'aggregation_level': 1}", '(False)'], {}), "('icds-cas', 2017, None, 2, 'quarter',\n 'comparative', {'aggregation_level': 1}, False)\n", (3046, 3136), False, 'from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data\n'), ((9909, 10027), 'custom.icds_reports.reports.poshan_progress_dashboard_data.get_poshan_progress_dashboard_data', 'get_poshan_progress_dashboard_data', (['"""icds-cas"""', '(2017)', '(5)', '(2)', '"""month"""', '"""aggregated"""', "{'aggregation_level': 1}", '(False)'], {}), "('icds-cas', 2017, 5, 2, 'month',\n 'aggregated', {'aggregation_level': 1}, False)\n", (9943, 10027), False, 'from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data\n'), ((11341, 11464), 'custom.icds_reports.reports.poshan_progress_dashboard_data.get_poshan_progress_dashboard_data', 'get_poshan_progress_dashboard_data', (['"""icds-cas"""', '(2017)', 'None', '(2)', '"""quarter"""', '"""aggregated"""', "{'aggregation_level': 1}", '(False)'], {}), "('icds-cas', 2017, None, 2, 'quarter',\n 'aggregated', {'aggregation_level': 1}, False)\n", (11375, 11464), False, 'from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data\n')]
|
#!/usr/bin/env python
"""
This example illustrates embedding a visvis figure in an FLTK application.
"""
import fltk
import visvis as vv
# Create a visvis app instance, which wraps an fltk application object.
# This needs to be done *before* instantiating the main window.
app = vv.use('fltk')
class MainWindow(fltk.Fl_Window):
def __init__(self):
fltk.Fl_Window.__init__(self, 560, 420, "Embedding in FLTK")
# Make a panel with a button
but = fltk.Fl_Button(10,10,70,30, 'Click me')
but.callback(self._Plot)
# Make figure to draw stuff in
self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, "")
# Make box for resizing
box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,"")
self.resizable(box)
box.hide()
# Finish
self.end()
self.show()
self.fig._widget.show()
def _Plot(self, event):
# Make sure our figure is the active one
# If only one figure, this is not necessary.
#vv.figure(self.fig.nr)
# Clear it
vv.clf()
# Plot
vv.plot([1,2,3,1,6])
vv.legend(['this is a line'])
# Two ways to create the application and start the main loop
if True:
# The visvis way. Will run in interactive mode when used in IEP or IPython.
app.Create()
m = MainWindow()
app.Run()
else:
# The native way.
m = MainWindow()
fltk.Fl.run()
|
[
"fltk.Fl.run",
"visvis.use",
"fltk.Fl_Window.__init__",
"visvis.clf",
"visvis.plot",
"fltk.Fl_Button",
"fltk.Fl_Box",
"visvis.backends.backend_fltk.Figure",
"visvis.legend"
] |
[((283, 297), 'visvis.use', 'vv.use', (['"""fltk"""'], {}), "('fltk')\n", (289, 297), True, 'import visvis as vv\n'), ((1522, 1535), 'fltk.Fl.run', 'fltk.Fl.run', ([], {}), '()\n', (1533, 1535), False, 'import fltk\n'), ((366, 426), 'fltk.Fl_Window.__init__', 'fltk.Fl_Window.__init__', (['self', '(560)', '(420)', '"""Embedding in FLTK"""'], {}), "(self, 560, 420, 'Embedding in FLTK')\n", (389, 426), False, 'import fltk\n'), ((487, 529), 'fltk.Fl_Button', 'fltk.Fl_Button', (['(10)', '(10)', '(70)', '(30)', '"""Click me"""'], {}), "(10, 10, 70, 30, 'Click me')\n", (501, 529), False, 'import fltk\n'), ((627, 692), 'visvis.backends.backend_fltk.Figure', 'vv.backends.backend_fltk.Figure', (['(100)', '(10)', '(560 - 110)', '(420 - 20)', '""""""'], {}), "(100, 10, 560 - 110, 420 - 20, '')\n", (658, 692), True, 'import visvis as vv\n'), ((741, 802), 'fltk.Fl_Box', 'fltk.Fl_Box', (['fltk.FL_NO_BOX', '(100)', '(50)', '(560 - 110)', '(420 - 60)', '""""""'], {}), "(fltk.FL_NO_BOX, 100, 50, 560 - 110, 420 - 60, '')\n", (752, 802), False, 'import fltk\n'), ((1156, 1164), 'visvis.clf', 'vv.clf', ([], {}), '()\n', (1162, 1164), True, 'import visvis as vv\n'), ((1197, 1221), 'visvis.plot', 'vv.plot', (['[1, 2, 3, 1, 6]'], {}), '([1, 2, 3, 1, 6])\n', (1204, 1221), True, 'import visvis as vv\n'), ((1226, 1255), 'visvis.legend', 'vv.legend', (["['this is a line']"], {}), "(['this is a line'])\n", (1235, 1255), True, 'import visvis as vv\n')]
|
#!/usr/bin/env python
import math
import numpy as np
from . import linalg as la
from . import eulang
#Euler angle sequence: XYZ (world). First rotation about X, second rotation
#about Y, and the third rotation about Z axis of the world(i.e. fixed) frame.
#This is the same as the sequence used in Blender.
#In contrast, the XYZ sequence is understood in the Aerospace community as:
#First rotation about Z-axis, second rotation about Y-axis, and the third
#rotation about X-axis of the body frame.
#Axis_angle------------------------------------------------------------
def fix_axis_angle(axis, angle, normalize=True):
if normalize:
norm = np.linalg.norm(axis)
if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14):
axis /= norm
angle = math.fmod(angle, 2*math.pi)
if angle < 0.0:
angle = -angle
axis = -axis
if angle > math.pi:
angle = 2*math.pi - angle
axis = -axis
return (axis, angle)
def get_rand_axis_angle():
'''
Generates a random pair of axis-angle. The axis is a random vector from
the surface of a unit sphere. Algorithm from Allen & Tildesley p. 349.
'''
axis = np.zeros((3,))
#Generate angle: A uniform random number from [0.0, 2*pi)
angle = 2.0*math.pi*np.random.random()
while True:
#Generate two uniform random numbers from [-1, 1)
zeta1 = 2.0*np.random.random() - 1.0
zeta2 = 2.0*np.random.random() - 1.0
zetasq = zeta1**2 + zeta2**2
if zetasq <= 1.0:
break
rt = np.sqrt(1.0-zetasq)
axis[0] = 2.0*zeta1*rt
axis[1] = 2.0*zeta2*rt
axis[2] = 1.0 - 2.0*zetasq
return fix_axis_angle(axis, angle)
def axis_angle_to_quat(axis, angle):
w = math.cos(angle/2)
v = math.sin(angle/2)*axis
q = np.array([w, v[0], v[1], v[2]])
return normalize_quat(q)
def axis_angle_to_euler(axis, angle, seq='XYZ', world=True):
rotmat = get_rotmat_axis_angle(axis, angle)
euler = factorize_rotmat(rotmat, seq=seq, world=world)
return euler
def axis_angle_to_dcm(axis, angle):
dcm = get_shiftmat_axis_angle(axis, angle, forward=True)
return dcm
def any_to_axis_angle(orientation):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
axis, angle = quat_to_axis_angle(quat)
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
axis, angle = euler_to_axis_angle(euler, seq=seq, world=world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
elif ori_repr == 'dcm':
axis, angle = dcm_to_axis_angle(orientation['dcm'])
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return axis, angle
def rotate_vector_axis_angle(v, axis, angle):
'''
Rotates vectors about axis by angle.
'''
rotmat = get_rotmat_axis_angle(axis, angle)
return np.dot(v, rotmat.T)
def get_rotmat_axis_angle(axis, angle):
R = np.zeros((3,3))
sin = np.sin(angle)
cos = np.cos(angle)
icos = 1.0 - cos
R[0,0] = axis[0]*axis[0]*icos + cos
R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin
R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin
R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin
R[1,1] = axis[1]*axis[1]*icos + cos
R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin
R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin
R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin
R[2,2] = axis[2]*axis[2]*icos + cos
return R
def extract_axis_angle_from_rotmat(rotmat):
trace = np.trace(rotmat)
angle = math.acos((trace-1)/2)
if angle > 0:
if angle < math.pi:
u0 = rotmat[2,1] - rotmat[1,2]
u1 = rotmat[0,2] - rotmat[2,0]
u2 = rotmat[1,0] - rotmat[0,1]
else:
#Find the largest entry in the diagonal of rotmat
k = np.argmax(np.diag(rotmat))
if k == 0:
u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2
s = 1.0/(2*u0)
u1 = s*rotmat[0,1]
u2 = s*rotmat[0,2]
elif k == 1:
u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2
s = 1.0/(2*u1)
u0 = s*rotmat[0,1]
u2 = s*rotmat[1,2]
elif k == 2:
u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2
s = 1.0/(2*u2)
u0 = s*rotmat[0,2]
u1 = s*rotmat[1,2]
else:
u0 = 1.0
u1 = 0.0
u2 = 0.0
return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True)
def shift_vector_axis_angle(v, axis, angle, forward=False):
shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_axis_angle(a, axis, angle, forward=False):
shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_axis_angle(a, axis, angle, forward=False):
shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_axis_angle(axis, angle, forward=False):
shiftmat = get_rotmat_axis_angle(-axis, angle)
if not forward:
shiftmat = shiftmat.T
return shiftmat
#Direction cosine matrix-----------------------------------------------
def dcm_from_axes(A, B):
'''
Returns the direction cosine matrix of axes(i.e. frame) B w.r.t.
axes(i.e. frame) A.
Parameters
----------
A : (3,3) ndarray
The rows of A represent the orthonormal basis vectors of frame A.
B : (3,3) ndarray
The rows of B represent the orthonormal basis vectors of frame B.
Returns
-------
(3,3) ndarray
The dcm of frame B w.r.t. frame A.
'''
return np.dot(B, A.T)
def dcm_to_quat(dcm):
mat = get_rotmat_dcm(dcm)
axis, angle = extract_axis_angle_from_rotmat(mat)
return axis_angle_to_quat(axis, angle)
def dcm_to_euler(dcm, seq='XYZ', world=True):
mat = get_rotmat_dcm(dcm)
euler = factorize_rotmat(mat, seq=seq, world=world)
return euler
def dcm_to_axis_angle(dcm):
mat = get_rotmat_dcm(dcm)
axis, angle = extract_axis_angle_from_rotmat(mat)
return (axis, angle)
def any_to_dcm(orientation):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
dcm = quat_to_dcm(quat)
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
dcm = euler_to_dcm(euler, seq=seq, world=world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
dcm = axis_angle_to_dcm(axis, angle)
elif ori_repr == 'dcm':
dcm = dcm_to_quat(orientation['dcm'])
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return dcm
def rotate_vector_dcm(v, dcm):
rotmat = get_rotmat_dcm(dcm)
return np.dot(v, rotmat.T)
def get_rotmat_dcm(dcm):
return dcm.T
def shift_vector_dcm(v, dcm, forward=False):
shiftmat = get_shiftmat_dcm(dcm, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_dcm(a, dcm, forward=False):
shiftmat = get_shiftmat_dcm(dcm, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_dcm(a, dcm, forward=False):
shiftmat = get_shiftmat_dcm(dcm, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_dcm(dcm, forward=False):
shiftmat = dcm
if not forward:
shiftmat = shiftmat.T
return shiftmat
#Euler angle-----------------------------------------------------------
def factorize_rotmat(rotmat, seq='XYZ', world=True):
return eulang.factor_rotmat(rotmat, seq=seq, world=world)
def euler_to_euler(euler, seq, world, to_seq, to_world):
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
return factorize_rotmat(rotmat, seq=to_seq, world=to_world)
def euler_to_quat(euler, seq='XYZ', world=True):
axis, angle = euler_to_axis_angle(euler, seq=seq, world=world)
return axis_angle_to_quat(axis, angle)
def euler_to_dcm(euler, seq='XYZ', world=True):
dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True)
return dcm
def euler_to_axis_angle(euler, seq='XYZ', world=True):
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
axis, angle = extract_axis_angle_from_rotmat(rotmat)
return (axis, angle)
def any_to_euler(orientation, to_seq, to_world):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
euler = quat_to_euler(quat, seq=to_seq, world=to_world)
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
euler = euler_to_euler(euler, seq, world, to_seq, to_world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world)
elif ori_repr == 'dcm':
euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world)
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return euler
def rotate_vector_euler(v, euler, seq='XYZ', world=True):
'''
Rotates vectors about axis by angle.
'''
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
return np.dot(v, rotmat.T)
def get_rotmat_euler(euler, seq='XYZ', world=True):
return eulang.rotmat_euler(euler, seq=seq, world=world)
def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False):
shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_euler(a, euler, forward=False):
shiftmat = get_shiftmat_euler(euler, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_euler(a, euler, forward=False):
shiftmat = get_shiftmat_euler(euler, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False):
rotmat = get_rotmat_euler(euler, seq=seq, world=world)
if forward:
shiftmat = rotmat.T
else:
shiftmat = rotmat
return shiftmat
#Quaternion-----------------------------------------------------------
def get_rand_quat():
q = np.random.random((4,))
return normalize_quat(q)
def get_identity_quat():
return np.array([1.0, 0.0, 0.0, 0.0])
def get_rand_quat():
axis, angle = get_rand_axis_angle()
return axis_angle_to_quat(axis, angle)
def get_perturbed_quat(q):
raise NotImplementedError
def quat_to_axis_angle(q):
angle = 2*math.acos(q[0])
sin = math.sqrt(1.0-q[0]**2)
if angle > 0.0:
if angle < math.pi:
axis = q[1:4]/sin
else:
rotmat = get_rotmat_quat(q)
axis, angle = extract_axis_angle_from_rotmat(rotmat)
else:
axis = np.array([1.0, 0.0, 0.0])
return fix_axis_angle(axis, angle, normalize=True)
def quat_to_euler(q, seq='XYZ', world=True):
rotmat = get_rotmat_quat(q)
return factorize_rotmat(rotmat, seq=seq, world=world)
def quat_to_dcm(q):
return get_shiftmat_quat(q, forward=True)
def any_to_quat(orientation):
ori_repr = orientation['repr']
if ori_repr == 'quat':
quat = np.array(orientation['quat'])
elif ori_repr == 'euler':
euler = np.array(orientation['euler'])
seq = orientation['seq']
world = orientation['world']
quat = euler_to_quat(euler, seq=seq, world=world)
elif ori_repr == 'axis_angle':
axis = np.array(orientation['axis'])
angle = orientation['angle']
quat = axis_angle_to_quat(axis, angle)
elif ori_repr == 'dcm':
quat = dcm_to_quat(orientation['dcm'])
else:
raise ValueError(
'Unrecognized orientation repr {0}'.format(ori_repr))
return quat
def rotate_vector_quat(v, q):
rotmat = get_rotmat_quat(q)
return np.dot(v, rotmat.T)
def get_rotmat_quat(q):
rotmat = np.empty((3,3))
q0sq = q[0]**2
q1sq = q[1]**2
q2sq = q[2]**2
q3sq = q[3]**2
q0q1 = q[0]*q[1]
q0q2 = q[0]*q[2]
q0q3 = q[0]*q[3]
q1q2 = q[1]*q[2]
q1q3 = q[1]*q[3]
q2q3 = q[2]*q[3]
rotmat[0,0] = 2*(q0sq + q1sq) - 1.0
rotmat[0,1] = 2*(q1q2 - q0q3)
rotmat[0,2] = 2*(q1q3 + q0q2)
rotmat[1,0] = 2*(q1q2 + q0q3)
rotmat[1,1] = 2*(q0sq + q2sq) - 1.0
rotmat[1,2] = 2*(q2q3 - q0q1)
rotmat[2,0] = 2*(q1q3 - q0q2)
rotmat[2,1] = 2*(q2q3 + q0q1)
rotmat[2,2] = 2*(q0sq + q3sq) - 1.0
return rotmat
def shift_vector_quat(v, q, forward=False):
shiftmat = get_shiftmat_quat(q, forward=forward)
return np.dot(v, shiftmat.T)
def shift_tensor2_quat(a, quat, forward=False):
shiftmat = get_shiftmat_quat(quat, forward=forward)
return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)
def shift_tensor3_quat(a, quat, forward=False):
shiftmat = get_shiftmat_quat(quat, forward=forward)
return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)
def get_shiftmat_quat(q, forward=False):
if forward:
shiftmat = get_rotmat_quat(get_conjugated_quat(q))
else:
shiftmat = get_rotmat_quat(q)
return shiftmat
def conjugate_quat(q):
'''
Conjugates a quaternion in-place.
'''
q[1:4] = -q[1:4]
return q
def get_conjugated_quat(q):
'''
Conjugates a quaternion and returns a copy.
'''
p = np.copy(q)
p[1:4] = -p[1:4]
return p
def invert_quat(q):
'''
Inverts a quaternion in-place.
'''
return conjugate_quat(q)
def get_inverted_quat(q):
'''
Inverts a quaternion and returns it as a new instance.
'''
p = np.copy(q)
return conjugate_quat(p)
def normalize_quat(q):
'''
Normalizes a quaternion in-place.
'''
q /= np.linalg.norm(q)
return q
def get_normalized_quat(q):
'''
Normalizes a quaternion and returns it as a copy.
'''
p = np.copy(q)
return normalize_quat(p)
def quat_is_normalized(q):
norm = np.linalg.norm(q)
if math.isclose(norm, 1.0, rel_tol=1e-14):
return True
else:
return False
def get_quat_prod(p, q):
p0, p1, p2, p3 = tuple(p)
prod_mat = np.array([[p0, -p1, -p2, -p3],
[p1, p0, -p3, p2],
[p2, p3, p0, -p1],
[p3, -p2, p1, p0]])
pq = normalize_quat(np.dot(prod_mat, q))
return pq
def interpolate_quat(q1, q2, t):
theta = get_angle_between_quat(q1, q2)
q = (q1*math.sin((1.0-t)*theta)
+ q2*math.sin(t*theta))/math.sin(theta)
return normalize_quat(q)
def get_angle_between_quat(p, q):
'''
Returns the angle between two quaternions p and q.
'''
return math.acos(np.dot(p,q))
def quat_deriv_to_ang_vel(q, qdot):
mat = quat_deriv_to_ang_vel_mat(q)
return np.dot(mat, qdot)
def quat_deriv_to_ang_vel_mat(q):
q0, q1, q2, q3 = tuple(q)
return 2*np.array([[-q1, q0, -q3, q2],
[-q2, q3, q0, -q1],
[-q3, -q2, q1, q0]])
def ang_vel_to_quat_deriv(q, ang_vel):
mat = ang_vel_to_quat_deriv_mat(q)
qdot = np.dot(mat, ang_vel)
return qdot
def ang_vel_to_quat_deriv_mat(q):
q0, q1, q2, q3 = tuple(q)
return 0.5*np.array([[-q1, -q2, -q3],
[ q0, q3, -q2],
[-q3, q0, q1],
[ q2, -q1, q0]])
#Other functions------------------------------------------------------
def translate(v, delta):
'''
Translates vectors inplace by delta.
'''
n = v.shape[0]
for i in range(n):
v[i,:] += delta
return v
def align(v, old, new):
'''
old and new represent coordinate axes. They must be unit vectors.
'''
assert old.shape[0] == new.shape[0]
n = old.shape[0]
if n == 1:
angle = math.acos(np.dot(old, new))
axis = la.unitized(np.cross(old, new))
return rotate_vector_axis_angle(v, axis, angle)
elif n == 2:
z_old = la.unitized(np.cross(old[0,:], old[1,:]))
z_new = la.unitized(np.cross(new[0,:], new[1,:]))
axes_old = np.vstack((old, z_old))
axes_new = np.vstack((new, z_new))
dcm = dcm_from_axes(axes_old, axes_new)
return rotate_vector_dcm(v, dcm)
elif n == 3:
dcm = dcm_from_axes(old, new)
return rotate_vector_dcm(v, dcm)
def mat_is_dcm(mat):
return mat_is_rotmat(mat)
def mat_is_rotmat(mat):
det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12)
is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3))
return is_orthogonal and det_is_one
|
[
"numpy.trace",
"numpy.empty",
"numpy.einsum",
"numpy.sin",
"numpy.linalg.norm",
"numpy.diag",
"math.fmod",
"numpy.copy",
"numpy.identity",
"math.cos",
"numpy.linalg.det",
"math.sqrt",
"numpy.cross",
"math.sin",
"numpy.cos",
"numpy.dot",
"numpy.vstack",
"numpy.zeros",
"math.acos",
"numpy.random.random",
"numpy.array",
"math.isclose",
"numpy.sqrt"
] |
[((784, 813), 'math.fmod', 'math.fmod', (['angle', '(2 * math.pi)'], {}), '(angle, 2 * math.pi)\n', (793, 813), False, 'import math\n'), ((1187, 1201), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (1195, 1201), True, 'import numpy as np\n'), ((1561, 1582), 'numpy.sqrt', 'np.sqrt', (['(1.0 - zetasq)'], {}), '(1.0 - zetasq)\n', (1568, 1582), True, 'import numpy as np\n'), ((1752, 1771), 'math.cos', 'math.cos', (['(angle / 2)'], {}), '(angle / 2)\n', (1760, 1771), False, 'import math\n'), ((1809, 1840), 'numpy.array', 'np.array', (['[w, v[0], v[1], v[2]]'], {}), '([w, v[0], v[1], v[2]])\n', (1817, 1840), True, 'import numpy as np\n'), ((3076, 3095), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (3082, 3095), True, 'import numpy as np\n'), ((3146, 3162), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3154, 3162), True, 'import numpy as np\n'), ((3172, 3185), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3178, 3185), True, 'import numpy as np\n'), ((3196, 3209), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3202, 3209), True, 'import numpy as np\n'), ((3710, 3726), 'numpy.trace', 'np.trace', (['rotmat'], {}), '(rotmat)\n', (3718, 3726), True, 'import numpy as np\n'), ((3739, 3765), 'math.acos', 'math.acos', (['((trace - 1) / 2)'], {}), '((trace - 1) / 2)\n', (3748, 3765), False, 'import math\n'), ((4924, 4945), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (4930, 4945), True, 'import numpy as np\n'), ((5089, 5133), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (5098, 5133), True, 'import numpy as np\n'), ((5277, 5335), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (5286, 5335), True, 'import numpy as np\n'), ((6047, 6061), 'numpy.dot', 'np.dot', (['B', 'A.T'], {}), '(B, A.T)\n', (6053, 6061), True, 'import numpy as np\n'), ((7306, 7325), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (7312, 7325), True, 'import numpy as np\n'), ((7482, 7503), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (7488, 7503), True, 'import numpy as np\n'), ((7617, 7661), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (7626, 7661), True, 'import numpy as np\n'), ((7775, 7833), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (7784, 7833), True, 'import numpy as np\n'), ((9877, 9896), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (9883, 9896), True, 'import numpy as np\n'), ((10176, 10197), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (10182, 10197), True, 'import numpy as np\n'), ((10319, 10363), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (10328, 10363), True, 'import numpy as np\n'), ((10485, 10543), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (10494, 10543), True, 'import numpy as np\n'), ((10877, 10899), 'numpy.random.random', 'np.random.random', (['(4,)'], {}), '((4,))\n', (10893, 10899), True, 'import numpy as np\n'), ((10967, 10997), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (10975, 10997), True, 'import numpy as np\n'), ((11232, 11258), 'math.sqrt', 'math.sqrt', (['(1.0 - q[0] ** 2)'], {}), '(1.0 - q[0] ** 2)\n', (11241, 11258), False, 'import math\n'), ((12539, 12558), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (12545, 12558), True, 'import numpy as np\n'), ((12598, 12614), 'numpy.empty', 'np.empty', (['(3, 3)'], {}), '((3, 3))\n', (12606, 12614), True, 'import numpy as np\n'), ((13270, 13291), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (13276, 13291), True, 'import numpy as np\n'), ((13409, 13453), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (13418, 13453), True, 'import numpy as np\n'), ((13571, 13629), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (13580, 13629), True, 'import numpy as np\n'), ((14032, 14042), 'numpy.copy', 'np.copy', (['q'], {}), '(q)\n', (14039, 14042), True, 'import numpy as np\n'), ((14292, 14302), 'numpy.copy', 'np.copy', (['q'], {}), '(q)\n', (14299, 14302), True, 'import numpy as np\n'), ((14421, 14438), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (14435, 14438), True, 'import numpy as np\n'), ((14561, 14571), 'numpy.copy', 'np.copy', (['q'], {}), '(q)\n', (14568, 14571), True, 'import numpy as np\n'), ((14641, 14658), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (14655, 14658), True, 'import numpy as np\n'), ((14666, 14704), 'math.isclose', 'math.isclose', (['norm', '(1.0)'], {'rel_tol': '(1e-14)'}), '(norm, 1.0, rel_tol=1e-14)\n', (14678, 14704), False, 'import math\n'), ((14829, 14922), 'numpy.array', 'np.array', (['[[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]'], {}), '([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -\n p2, p1, p0]])\n', (14837, 14922), True, 'import numpy as np\n'), ((15483, 15500), 'numpy.dot', 'np.dot', (['mat', 'qdot'], {}), '(mat, qdot)\n', (15489, 15500), True, 'import numpy as np\n'), ((15794, 15814), 'numpy.dot', 'np.dot', (['mat', 'ang_vel'], {}), '(mat, ang_vel)\n', (15800, 15814), True, 'import numpy as np\n'), ((656, 676), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (670, 676), True, 'import numpy as np\n'), ((1288, 1306), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1304, 1306), True, 'import numpy as np\n'), ((1778, 1797), 'math.sin', 'math.sin', (['(angle / 2)'], {}), '(angle / 2)\n', (1786, 1797), False, 'import math\n'), ((2286, 2315), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (2294, 2315), True, 'import numpy as np\n'), ((4735, 4757), 'numpy.array', 'np.array', (['[u0, u1, u2]'], {}), '([u0, u1, u2])\n', (4743, 4757), True, 'import numpy as np\n'), ((6611, 6640), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (6619, 6640), True, 'import numpy as np\n'), ((8962, 8991), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (8970, 8991), True, 'import numpy as np\n'), ((11206, 11221), 'math.acos', 'math.acos', (['q[0]'], {}), '(q[0])\n', (11215, 11221), False, 'import math\n'), ((11477, 11502), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (11485, 11502), True, 'import numpy as np\n'), ((11872, 11901), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (11880, 11901), True, 'import numpy as np\n'), ((15023, 15042), 'numpy.dot', 'np.dot', (['prod_mat', 'q'], {}), '(prod_mat, q)\n', (15029, 15042), True, 'import numpy as np\n'), ((15208, 15223), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (15216, 15223), False, 'import math\n'), ((15382, 15394), 'numpy.dot', 'np.dot', (['p', 'q'], {}), '(p, q)\n', (15388, 15394), True, 'import numpy as np\n'), ((15580, 15650), 'numpy.array', 'np.array', (['[[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]'], {}), '([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]])\n', (15588, 15650), True, 'import numpy as np\n'), ((15912, 15984), 'numpy.array', 'np.array', (['[[-q1, -q2, -q3], [q0, q3, -q2], [-q3, q0, q1], [q2, -q1, q0]]'], {}), '([[-q1, -q2, -q3], [q0, q3, -q2], [-q3, q0, q1], [q2, -q1, q0]])\n', (15920, 15984), True, 'import numpy as np\n'), ((17150, 17168), 'numpy.linalg.det', 'np.linalg.det', (['mat'], {}), '(mat)\n', (17163, 17168), True, 'import numpy as np\n'), ((17237, 17255), 'numpy.dot', 'np.dot', (['mat', 'mat.T'], {}), '(mat, mat.T)\n', (17243, 17255), True, 'import numpy as np\n'), ((17257, 17271), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (17268, 17271), True, 'import numpy as np\n'), ((692, 745), 'math.isclose', 'math.isclose', (['norm', '(1.0)'], {'abs_tol': '(1e-14)', 'rel_tol': '(1e-14)'}), '(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14)\n', (704, 745), False, 'import math\n'), ((2409, 2439), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (2417, 2439), True, 'import numpy as np\n'), ((6719, 6749), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (6727, 6749), True, 'import numpy as np\n'), ((9102, 9132), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (9110, 9132), True, 'import numpy as np\n'), ((11948, 11978), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (11956, 11978), True, 'import numpy as np\n'), ((16516, 16532), 'numpy.dot', 'np.dot', (['old', 'new'], {}), '(old, new)\n', (16522, 16532), True, 'import numpy as np\n'), ((16561, 16579), 'numpy.cross', 'np.cross', (['old', 'new'], {}), '(old, new)\n', (16569, 16579), True, 'import numpy as np\n'), ((16789, 16812), 'numpy.vstack', 'np.vstack', (['(old, z_old)'], {}), '((old, z_old))\n', (16798, 16812), True, 'import numpy as np\n'), ((16832, 16855), 'numpy.vstack', 'np.vstack', (['(new, z_new)'], {}), '((new, z_new))\n', (16841, 16855), True, 'import numpy as np\n'), ((1401, 1419), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1417, 1419), True, 'import numpy as np\n'), ((1446, 1464), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1462, 1464), True, 'import numpy as np\n'), ((2631, 2660), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (2639, 2660), True, 'import numpy as np\n'), ((4039, 4054), 'numpy.diag', 'np.diag', (['rotmat'], {}), '(rotmat)\n', (4046, 4054), True, 'import numpy as np\n'), ((6926, 6955), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (6934, 6955), True, 'import numpy as np\n'), ((9321, 9350), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (9329, 9350), True, 'import numpy as np\n'), ((12157, 12186), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (12165, 12186), True, 'import numpy as np\n'), ((15148, 15175), 'math.sin', 'math.sin', (['((1.0 - t) * theta)'], {}), '((1.0 - t) * theta)\n', (15156, 15175), False, 'import math\n'), ((15189, 15208), 'math.sin', 'math.sin', (['(t * theta)'], {}), '(t * theta)\n', (15197, 15208), False, 'import math\n'), ((16682, 16712), 'numpy.cross', 'np.cross', (['old[0, :]', 'old[1, :]'], {}), '(old[0, :], old[1, :])\n', (16690, 16712), True, 'import numpy as np\n'), ((16740, 16770), 'numpy.cross', 'np.cross', (['new[0, :]', 'new[1, :]'], {}), '(new[0, :], new[1, :])\n', (16748, 16770), True, 'import numpy as np\n'), ((4100, 4157), 'math.sqrt', 'math.sqrt', (['(rotmat[0, 0] - rotmat[1, 1] - rotmat[2, 2] + 1)'], {}), '(rotmat[0, 0] - rotmat[1, 1] - rotmat[2, 2] + 1)\n', (4109, 4157), False, 'import math\n'), ((4298, 4355), 'math.sqrt', 'math.sqrt', (['(rotmat[1, 1] - rotmat[0, 0] - rotmat[2, 2] + 1)'], {}), '(rotmat[1, 1] - rotmat[0, 0] - rotmat[2, 2] + 1)\n', (4307, 4355), False, 'import math\n'), ((4496, 4553), 'math.sqrt', 'math.sqrt', (['(rotmat[2, 2] - rotmat[0, 0] - rotmat[1, 1] + 1)'], {}), '(rotmat[2, 2] - rotmat[0, 0] - rotmat[1, 1] + 1)\n', (4505, 4553), False, 'import math\n')]
|
#! /usr/bin/python3
import argparse
import hashlib
import mmap
import os
import posixpath
import stat
import sys
from collections import namedtuple
exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins')
joinpath = posixpath.join
FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum'])
def get_file_desc(full_path, want_checksum):
try:
stinf = os.stat(full_path)
except:
return None
if stat.S_ISREG(stinf.st_mode):
item_type, item_size = 'F', stinf.st_size
elif stat.S_ISDIR(stinf.st_mode):
item_type, item_size = 'D', 0
else:
return None
checksum = 0
if item_type == 'F' and item_size > 0 and want_checksum(full_path):
with open(full_path, 'rb') as fl:
mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ)
checksum = hashlib.md5(mm).hexdigest().lower()
mm.close()
return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum)
def get_file_stats(base_dir, exclusions, get_checksums=None):
if get_checksums is None:
want_checksum = lambda filename: False
elif not get_checksums:
want_checksum = lambda filename: True
else:
want_checksum = lambda filename: filename in get_checksums
for base, dirs, files in os.walk(base_dir):
if base == base_dir:
# Skip top-level cruft
dirs[:] = [d for d in dirs if d not in exclusions]
for filename in files:
fd = get_file_desc(posixpath.join(base, filename), want_checksum)
if fd:
yield fd
def ls_cmd(args):
if args.checksum:
get_checksums = []
else:
get_checksums = args.filepath or None
for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums):
print("{},{},{},{},{}".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path))
class CompareError(Exception):
def __init__(self, action, reason):
super().__init__("Compare failed")
self.action = action
self.reason = reason
def cmp_cmd(args):
do_checksum, dont_checksum = lambda fn: True, lambda fn: False
with open(args.csv, "r") as fl:
for line in (l.strip() for l in fl):
if line:
remote_fd = FileDesc(line.split(',', 4))
local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path))
want_checksum = do_checksum if local_fd.checksum else dont_checksum
local_fd = get_file_desc(local_path, want_checksum)
try:
if not local_fd:
raise CompareError("download", "missing")
if local_fd.item_type != remote_fd.item_type:
if remote_fd.item_type == 'D':
raise CompareError("mkdir", "changed")
elif remote_fd.item_type == 'F':
raise CompareError("download", "changed")
if remote_fd.size != local_fd.size:
raise CompareError("download", "size")
if remote_fd.checksum != local_fd.checksum:
raise CompareError("download", "checksum")
if remote_fd.mtime != local_fd.mtime:
os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime))
raise CompareError("#touched", "mtime")
except CompareError as e:
print("%s,%s,%s,%s" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path))
if __name__ == "__main__":
argp = argparse.ArgumentParser("hasher")
argp.add_argument("--base-dir", "-C", dest="base_dir", default="/svn", help="Base folder")
subp = argp.add_subparsers()
lscmd = subp.add_parser("ls")
lscmd.add_argument("--checksum", action="store_true", help="Force checksumming")
lscmd.add_argument("filepath", action="append", default=[], nargs='*', help="File paths to check")
lscmd.set_defaults(func=ls_cmd)
cmpcmd = subp.add_parser("cmp")
lscmd.add_argument("csv", type=str, help="CSV file to check against")
args = argp.parse_args(sys.argv[1:])
if not hasattr(args, 'func'):
raise RuntimeError("No sub-command specified. See --help for assistance.")
args.func(args)
|
[
"hashlib.md5",
"stat.S_ISREG",
"os.stat",
"argparse.ArgumentParser",
"os.walk",
"posixpath.join",
"collections.namedtuple",
"os.utime",
"stat.S_ISDIR",
"os.path.join"
] |
[((272, 351), 'collections.namedtuple', 'namedtuple', (['"""FileDesc"""', "['full_path', 'item_type', 'size', 'mtime', 'checksum']"], {}), "('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum'])\n", (282, 351), False, 'from collections import namedtuple\n'), ((481, 508), 'stat.S_ISREG', 'stat.S_ISREG', (['stinf.st_mode'], {}), '(stinf.st_mode)\n', (493, 508), False, 'import stat\n'), ((1351, 1368), 'os.walk', 'os.walk', (['base_dir'], {}), '(base_dir)\n', (1358, 1368), False, 'import os\n'), ((3752, 3785), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""hasher"""'], {}), "('hasher')\n", (3775, 3785), False, 'import argparse\n'), ((423, 441), 'os.stat', 'os.stat', (['full_path'], {}), '(full_path)\n', (430, 441), False, 'import os\n'), ((569, 596), 'stat.S_ISDIR', 'stat.S_ISDIR', (['stinf.st_mode'], {}), '(stinf.st_mode)\n', (581, 596), False, 'import stat\n'), ((1562, 1592), 'posixpath.join', 'posixpath.join', (['base', 'filename'], {}), '(base, filename)\n', (1576, 1592), False, 'import posixpath\n'), ((2464, 2512), 'os.path.join', 'os.path.join', (['args.base_dir', 'remote_fd.full_path'], {}), '(args.base_dir, remote_fd.full_path)\n', (2476, 2512), False, 'import os\n'), ((3443, 3497), 'os.utime', 'os.utime', (['local_fd', '(remote_fd.mtime, remote_fd.mtime)'], {}), '(local_fd, (remote_fd.mtime, remote_fd.mtime))\n', (3451, 3497), False, 'import os\n'), ((889, 904), 'hashlib.md5', 'hashlib.md5', (['mm'], {}), '(mm)\n', (900, 904), False, 'import hashlib\n')]
|
import sklearn.datasets as datasets
from numpywren.matrix import BigMatrix
from numpywren import matrix_utils, binops
from numpywren.matrix_init import shard_matrix
import pytest
import numpy as np
import pywren
import unittest
class IndexingTestClass(unittest.TestCase):
def test_single_shard_index_get(self):
X = np.random.randn(128, 128)
X_sharded = BigMatrix("test_0", shape=X.shape, shard_sizes=X.shape)
shard_matrix(X_sharded, X)
X_sharded_local = X_sharded.submatrix(0, 0).get_block()
assert(np.all(X_sharded_local == X))
def test_single_shard_index_put(self):
X = np.random.randn(128, 128)
X_sharded = BigMatrix("test_1", shape=X.shape, shard_sizes=X.shape)
X_sharded.submatrix(0, 0).put_block(X)
assert(np.all(X_sharded.numpy() == X))
def test_multiple_shard_index_get(self):
X = np.random.randn(128, 128)
shard_sizes = [64, 64]
X_sharded = BigMatrix("test_2", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0)))
assert(np.all(X[64:128, 64:128] ==
X_sharded.submatrix(1, 1).get_block()))
assert(np.all(X[0:64, 64:128] ==
X_sharded.submatrix(0, 1).get_block()))
assert(np.all(X[64:128, 0:64] ==
X_sharded.submatrix(None, 0).get_block(1)))
def test_simple_slices(self):
X = np.random.randn(128, 128)
shard_sizes = [32, 32]
X_sharded = BigMatrix("test_3", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy()))
assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy()))
assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy()))
assert(np.all(X[:, 96:128] == X_sharded.submatrix(
None, [3, None]).numpy()))
def test_step_slices(self):
X = np.random.randn(128, 128)
shard_sizes = [16, 16]
X_sharded = BigMatrix("test_4", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[::32] == X_sharded.submatrix(
[None, None, 2]).numpy()[::16]))
assert(np.all(X[16::32] == X_sharded.submatrix(
[1, None, 2]).numpy()[::16]))
assert(np.all(X[:, 0:96:64] == X_sharded.submatrix(
None, [0, 6, 4]).numpy()[:, ::16]))
assert(np.all(X[:, 96:128:64] == X_sharded.submatrix(
None, [6, 8, 4]).numpy()[:, ::16]))
def test_complex_slices(self):
X = np.random.randn(21, 67, 53)
shard_sizes = [21, 16, 11]
X_sharded = BigMatrix("test_5", shape=X.shape, shard_sizes=shard_sizes)
shard_matrix(X_sharded, X)
assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy()))
assert(np.all(X[:, 64:67, 44:53] ==
X_sharded.submatrix(0, 4, 4).numpy()))
|
[
"numpywren.matrix_init.shard_matrix",
"numpywren.matrix.BigMatrix",
"numpy.all",
"numpy.random.randn"
] |
[((329, 354), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (344, 354), True, 'import numpy as np\n'), ((375, 430), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_0"""'], {'shape': 'X.shape', 'shard_sizes': 'X.shape'}), "('test_0', shape=X.shape, shard_sizes=X.shape)\n", (384, 430), False, 'from numpywren.matrix import BigMatrix\n'), ((439, 465), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (451, 465), False, 'from numpywren.matrix_init import shard_matrix\n'), ((545, 573), 'numpy.all', 'np.all', (['(X_sharded_local == X)'], {}), '(X_sharded_local == X)\n', (551, 573), True, 'import numpy as np\n'), ((631, 656), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (646, 656), True, 'import numpy as np\n'), ((677, 732), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_1"""'], {'shape': 'X.shape', 'shard_sizes': 'X.shape'}), "('test_1', shape=X.shape, shard_sizes=X.shape)\n", (686, 732), False, 'from numpywren.matrix import BigMatrix\n'), ((885, 910), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (900, 910), True, 'import numpy as np\n'), ((962, 1021), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_2"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_2', shape=X.shape, shard_sizes=shard_sizes)\n", (971, 1021), False, 'from numpywren.matrix import BigMatrix\n'), ((1030, 1056), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (1042, 1056), False, 'from numpywren.matrix_init import shard_matrix\n'), ((1496, 1521), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (1511, 1521), True, 'import numpy as np\n'), ((1573, 1632), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_3"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_3', shape=X.shape, shard_sizes=shard_sizes)\n", (1582, 1632), False, 'from numpywren.matrix import BigMatrix\n'), ((1641, 1667), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (1653, 1667), False, 'from numpywren.matrix_init import shard_matrix\n'), ((2035, 2060), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (2050, 2060), True, 'import numpy as np\n'), ((2112, 2171), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_4"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_4', shape=X.shape, shard_sizes=shard_sizes)\n", (2121, 2171), False, 'from numpywren.matrix import BigMatrix\n'), ((2180, 2206), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (2192, 2206), False, 'from numpywren.matrix_init import shard_matrix\n'), ((2670, 2697), 'numpy.random.randn', 'np.random.randn', (['(21)', '(67)', '(53)'], {}), '(21, 67, 53)\n', (2685, 2697), True, 'import numpy as np\n'), ((2753, 2812), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_5"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_5', shape=X.shape, shard_sizes=shard_sizes)\n", (2762, 2812), False, 'from numpywren.matrix import BigMatrix\n'), ((2821, 2847), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (2833, 2847), False, 'from numpywren.matrix_init import shard_matrix\n')]
|
# Generated by Django 2.2.10 on 2020-04-10 22:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0023_game_max_words'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='max_words',
new_name='words_per_player',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((221, 318), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""game"""', 'old_name': '"""max_words"""', 'new_name': '"""words_per_player"""'}), "(model_name='game', old_name='max_words', new_name=\n 'words_per_player')\n", (243, 318), False, 'from django.db import migrations\n')]
|
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import subprocess
import sys
import tempfile
from oslo_config import cfg
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log
from catena.common import config
from catena.common.utils import decrypt_private_rsakey
from catena.common.utils import decrypt_rsakey
from catena.db.sqlalchemy import api as db_api
from catena.db.sqlalchemy import models
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def register_models():
context = enginefacade.writer.get_engine()
return models.register_models(context)
def unregister_models():
context = enginefacade.writer.get_engine()
return models.unregister_models(context)
def output_ssh_key():
context = db_api.get_context()
chain = db_api.get_chain(context, CONF.sub.chain_id)
if chain is None:
return LOG.error('This chain-id does not exist')
node = db_api.get_node(context, chain, CONF.sub.node_id)
if node is None:
return LOG.error('This node-id does not exist')
print(decrypt_rsakey(node.ssh_key))
def open_ssh_connection():
context = db_api.get_context()
chain = db_api.get_chain(context, CONF.sub.chain_id)
if chain is None:
return LOG.error('This chain-id does not exist')
node = db_api.get_node(context, chain, CONF.sub.node_id)
if node is None:
return LOG.error('This node-id does not exist')
home = os.path.expanduser("~/.ssh")
jumpbox_ip = chain.get_cloud_config()['jumpbox_ip']
with tempfile.NamedTemporaryFile(
dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile(
dir=home) as temp_jumpbox_ssh:
decrypt_private_rsakey(node.ssh_key, temp_node_ssh)
decrypt_private_rsakey(
chain.get_cloud_config()['jumpbox_key'],
temp_jumpbox_ssh
)
args = [
'/bin/bash', '-c',
'ssh -i {} -o ProxyCommand="ssh -q -i {} -W %h:%p ubuntu@{}" -o '
'StrictHostKeyChecking=no ubuntu@{}'.format(
temp_node_ssh.name,
temp_jumpbox_ssh.name,
jumpbox_ip,
node.ip)
]
process = subprocess.Popen(args)
process.wait()
def register_sub_opts(subparser):
parser = subparser.add_parser('db_sync')
parser.set_defaults(action_fn=register_models)
parser.set_defaults(action='db_sync')
parser = subparser.add_parser('db_remove')
parser.set_defaults(action_fn=unregister_models)
parser.set_defaults(action='db_remove')
parser = subparser.add_parser('ssh_key')
parser.add_argument('chain_id')
parser.add_argument('node_id')
parser.set_defaults(action_fn=output_ssh_key)
parser.set_defaults(action='ssh_key')
parser = subparser.add_parser('ssh')
parser.add_argument('chain_id')
parser.add_argument('node_id')
parser.set_defaults(action_fn=open_ssh_connection)
parser.set_defaults(action='ssh')
SUB_OPTS = [
cfg.SubCommandOpt(
'sub',
dest='sub',
title='Sub Options',
handler=register_sub_opts)
]
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opts(SUB_OPTS)
config.parse_args(sys.argv[1:])
config.setup_logging()
try:
if CONF.sub.action.startswith('db'):
return CONF.sub.action_fn()
if CONF.sub.action.startswith('ssh'):
return CONF.sub.action_fn()
except Exception as e:
sys.exit("ERROR: {0}".format(e))
|
[
"tempfile.NamedTemporaryFile",
"subprocess.Popen",
"oslo_log.log.getLogger",
"oslo_config.cfg.SubCommandOpt",
"catena.common.utils.decrypt_private_rsakey",
"catena.db.sqlalchemy.api.get_chain",
"catena.common.config.parse_args",
"catena.db.sqlalchemy.models.register_models",
"catena.common.utils.decrypt_rsakey",
"catena.db.sqlalchemy.api.get_context",
"oslo_db.sqlalchemy.enginefacade.writer.get_engine",
"catena.common.config.setup_logging",
"os.path.expanduser",
"catena.db.sqlalchemy.models.unregister_models",
"catena.db.sqlalchemy.api.get_node"
] |
[((1050, 1073), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1063, 1073), False, 'from oslo_log import log\n'), ((1113, 1145), 'oslo_db.sqlalchemy.enginefacade.writer.get_engine', 'enginefacade.writer.get_engine', ([], {}), '()\n', (1143, 1145), False, 'from oslo_db.sqlalchemy import enginefacade\n'), ((1157, 1188), 'catena.db.sqlalchemy.models.register_models', 'models.register_models', (['context'], {}), '(context)\n', (1179, 1188), False, 'from catena.db.sqlalchemy import models\n'), ((1230, 1262), 'oslo_db.sqlalchemy.enginefacade.writer.get_engine', 'enginefacade.writer.get_engine', ([], {}), '()\n', (1260, 1262), False, 'from oslo_db.sqlalchemy import enginefacade\n'), ((1274, 1307), 'catena.db.sqlalchemy.models.unregister_models', 'models.unregister_models', (['context'], {}), '(context)\n', (1298, 1307), False, 'from catena.db.sqlalchemy import models\n'), ((1346, 1366), 'catena.db.sqlalchemy.api.get_context', 'db_api.get_context', ([], {}), '()\n', (1364, 1366), True, 'from catena.db.sqlalchemy import api as db_api\n'), ((1379, 1423), 'catena.db.sqlalchemy.api.get_chain', 'db_api.get_chain', (['context', 'CONF.sub.chain_id'], {}), '(context, CONF.sub.chain_id)\n', (1395, 1423), True, 'from catena.db.sqlalchemy import api as db_api\n'), ((1515, 1564), 'catena.db.sqlalchemy.api.get_node', 'db_api.get_node', (['context', 'chain', 'CONF.sub.node_id'], {}), '(context, chain, CONF.sub.node_id)\n', (1530, 1564), True, 'from catena.db.sqlalchemy import api as db_api\n'), ((1725, 1745), 'catena.db.sqlalchemy.api.get_context', 'db_api.get_context', ([], {}), '()\n', (1743, 1745), True, 'from catena.db.sqlalchemy import api as db_api\n'), ((1759, 1803), 'catena.db.sqlalchemy.api.get_chain', 'db_api.get_chain', (['context', 'CONF.sub.chain_id'], {}), '(context, CONF.sub.chain_id)\n', (1775, 1803), True, 'from catena.db.sqlalchemy import api as db_api\n'), ((1895, 1944), 'catena.db.sqlalchemy.api.get_node', 'db_api.get_node', (['context', 'chain', 'CONF.sub.node_id'], {}), '(context, chain, CONF.sub.node_id)\n', (1910, 1944), True, 'from catena.db.sqlalchemy import api as db_api\n'), ((2034, 2062), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.ssh"""'], {}), "('~/.ssh')\n", (2052, 2062), False, 'import os\n'), ((3591, 3680), 'oslo_config.cfg.SubCommandOpt', 'cfg.SubCommandOpt', (['"""sub"""'], {'dest': '"""sub"""', 'title': '"""Sub Options"""', 'handler': 'register_sub_opts'}), "('sub', dest='sub', title='Sub Options', handler=\n register_sub_opts)\n", (3608, 3680), False, 'from oslo_config import cfg\n'), ((3829, 3860), 'catena.common.config.parse_args', 'config.parse_args', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (3846, 3860), False, 'from catena.common import config\n'), ((3865, 3887), 'catena.common.config.setup_logging', 'config.setup_logging', ([], {}), '()\n', (3885, 3887), False, 'from catena.common import config\n'), ((1652, 1680), 'catena.common.utils.decrypt_rsakey', 'decrypt_rsakey', (['node.ssh_key'], {}), '(node.ssh_key)\n', (1666, 1680), False, 'from catena.common.utils import decrypt_rsakey\n'), ((2130, 2167), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'dir': 'home'}), '(dir=home)\n', (2157, 2167), False, 'import tempfile\n'), ((2199, 2236), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'dir': 'home'}), '(dir=home)\n', (2226, 2236), False, 'import tempfile\n'), ((2275, 2326), 'catena.common.utils.decrypt_private_rsakey', 'decrypt_private_rsakey', (['node.ssh_key', 'temp_node_ssh'], {}), '(node.ssh_key, temp_node_ssh)\n', (2297, 2326), False, 'from catena.common.utils import decrypt_private_rsakey\n'), ((2792, 2814), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (2808, 2814), False, 'import subprocess\n')]
|
import os
import sys
import discord
from bot import init_cfg, init_bot
from template import handle, Context
DEBUG = True
TOKEN = ""
def init():
if len(sys.argv) <= 1:
sys.exit("start template: python main.py <TOKEN>")
global TOKEN
TOKEN = sys.argv[1]
if __name__ == "__main__":
init()
# create config and line dir
if not os.path.exists("cfg"):
os.mkdir("cfg")
if not os.path.exists("line"):
os.mkdir("line")
if not os.path.exists("emoji"):
os.mkdir("emoji")
while True:
try:
cli = discord.Client()
tts_bot = init_bot()
online = {}
async def helper(msg: discord.Message):
if DEBUG and msg.channel.name != "test":
# in debug mode, only serve messages from test
return
if not DEBUG and msg.channel.name == "test":
# not in debug mode, skip messages from test
return
guild_id = str(msg.guild.id)
if guild_id not in online:
online[guild_id] = init_cfg(guild_id)
await handle(Context(tts_bot, cli, online[guild_id], msg))
@cli.event
async def on_message(msg: discord.Message):
await helper(msg)
@cli.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await helper(after)
cli.run(TOKEN)
except Exception as e:
print(f"ERROR: {e}")
|
[
"bot.init_bot",
"os.mkdir",
"template.Context",
"os.path.exists",
"sys.exit",
"bot.init_cfg",
"discord.Client"
] |
[((185, 235), 'sys.exit', 'sys.exit', (['"""start template: python main.py <TOKEN>"""'], {}), "('start template: python main.py <TOKEN>')\n", (193, 235), False, 'import sys\n'), ((363, 384), 'os.path.exists', 'os.path.exists', (['"""cfg"""'], {}), "('cfg')\n", (377, 384), False, 'import os\n'), ((394, 409), 'os.mkdir', 'os.mkdir', (['"""cfg"""'], {}), "('cfg')\n", (402, 409), False, 'import os\n'), ((421, 443), 'os.path.exists', 'os.path.exists', (['"""line"""'], {}), "('line')\n", (435, 443), False, 'import os\n'), ((453, 469), 'os.mkdir', 'os.mkdir', (['"""line"""'], {}), "('line')\n", (461, 469), False, 'import os\n'), ((481, 504), 'os.path.exists', 'os.path.exists', (['"""emoji"""'], {}), "('emoji')\n", (495, 504), False, 'import os\n'), ((514, 531), 'os.mkdir', 'os.mkdir', (['"""emoji"""'], {}), "('emoji')\n", (522, 531), False, 'import os\n'), ((580, 596), 'discord.Client', 'discord.Client', ([], {}), '()\n', (594, 596), False, 'import discord\n'), ((620, 630), 'bot.init_bot', 'init_bot', ([], {}), '()\n', (628, 630), False, 'from bot import init_cfg, init_bot\n'), ((1142, 1160), 'bot.init_cfg', 'init_cfg', (['guild_id'], {}), '(guild_id)\n', (1150, 1160), False, 'from bot import init_cfg, init_bot\n'), ((1191, 1235), 'template.Context', 'Context', (['tts_bot', 'cli', 'online[guild_id]', 'msg'], {}), '(tts_bot, cli, online[guild_id], msg)\n', (1198, 1235), False, 'from template import handle, Context\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from xy.device import Device
def main(args):
from modules.utils import get_paths_from_n_files as get
pattern = args.pattern
steps = args.steps
stride = args.stride
skip = args.skip
paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4)
with Device(scale=0.99, penup=0.4) as device:
device.do_paths(paths)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--pattern',
type=str,
required=True
)
parser.add_argument(
'--steps',
type=int,
default=100000
)
parser.add_argument(
'--stride',
type=int,
default=1
)
parser.add_argument(
'--skip',
type=int,
default=0
)
args = parser.parse_args()
main(args)
|
[
"modules.utils.get_paths_from_n_files",
"argparse.ArgumentParser",
"xy.device.Device"
] |
[((249, 335), 'modules.utils.get_paths_from_n_files', 'get', (['pattern', 'skip', 'steps', 'stride'], {'spatial_concat': '(True)', 'spatial_concat_eps': '(0.0001)'}), '(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=\n 0.0001)\n', (252, 335), True, 'from modules.utils import get_paths_from_n_files as get\n'), ((464, 489), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (487, 489), False, 'import argparse\n'), ((337, 366), 'xy.device.Device', 'Device', ([], {'scale': '(0.99)', 'penup': '(0.4)'}), '(scale=0.99, penup=0.4)\n', (343, 366), False, 'from xy.device import Device\n')]
|
from functiontools import wraps
from benchmark import benchmark
def memo(func):
cache = {}
@wraps(func)
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
def fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
@memo
def fib2(n):
if n < 2:
return 1
else:
return fib2(n-1) + fib2(n-2)
def fib3(m,n):
if m[n] == 0:
m[n] = fib3(m, n-1) + fib3(m, n-2)
return m[n]
@benchmark
def test_fib(n):
print(fib(n))
@benchmark
def test_fib2(n):
print(fib2(n))
@benchmark
def test_fib3(n):
m = [0] * (n+1)
m[0], m[1] = 1, 1
print(fib3(m,n))
if __name__=="__main__":
n = 35
test_fib(n)
test_fib2(n)
test_fib3(n)
|
[
"functiontools.wraps"
] |
[((106, 117), 'functiontools.wraps', 'wraps', (['func'], {}), '(func)\n', (111, 117), False, 'from functiontools import wraps\n')]
|
import os
# from resticweb.dictionary.resticweb_constants import Repository as Rep
from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType
from resticweb.tools.local_session import LocalSession
from resticweb.misc.credential_manager import credential_manager
# from .repository import ResticRepository
from .repository_formatted import ResticRepositoryFormatted
from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot
import json
import traceback
from datetime import datetime
from resticweb.dateutil import parser
import logging
logger = logging.getLogger('debugLogger')
# repository_add_to_db is used instead of the following method
# it's located under resticweb.tools.job_callbacks
def add_repository(info):
with LocalSession() as session:
repository = Repository(
name=info['name'],
description=info.get('description'),
repo_id=info.get('repo_id'),
address=info['address'],
parameters=info['parameters'],
data=info.get('data'),
credential_group_id=info.get('credential_group_id'),
repository_type_id=info['repository_type_id'],
concurrent_uses=info.get('concurrent_uses'),
timeout=info.get('timeout')
)
session.add(repository)
session.commit()
return repository.id
def update_repository(info, repo_id, sync_db=False, unsync_db=False):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=repo_id).first()
if repository.name != info['name']:
credential_manager.set_service_id(repository.credential_group_id, info['name'])
repository.name = info['name']
repository.description = info.get('description')
repository.address = info['address']
repository.cache_repo = info['cache_repo']
repository.concurrent_uses = info['concurrent_uses']
repository.timeout = info['timeout']
repository.parameters = json.dumps(info['parameters'])
session.commit()
from resticweb.tools.job_build import JobBuilder
if sync_db:
job_builder = JobBuilder(job_name=f"Sync repo {repository.name}", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full'))
job_builder.run_job()
if unsync_db:
'''
for snapshot in repository.snapshots:
snapshot.snapshot_objects = []
session.commit()
'''
job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id))
job_builder.run_job()
return repo_id
def delete_repositories(ids):
credential_groups = []
with LocalSession() as session:
for id in ids:
repo_to_remove = session.query(Repository).filter_by(id=id).first()
# credential_manager.remove_credentials(repo_to_remove.credential_group_id)
credential_groups.append(repo_to_remove.credential_group_id)
job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all()
for parameter in job_parameters:
parameter.param_value = None
session.delete(repo_to_remove)
session.commit()
for id in credential_groups:
credential_manager.remove_credentials(id)
def get_repository_from_snap_id(snap_id):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first()
repository = session.query(Repository).filter_by(id=snapshot.repository_id).first()
return repository
# gets basic info about the repository from the database. Also grabs the stats
# from the repository itself like the total size and number of files.
# if use_cache is set to False then the repo stats are grabbed from repo itself
# which might take a bit of time
def get_info(id, repository_interface=None, use_cache=False, repo_status=True):
info_dict = {}
misc_data = None
if repo_status:
if not repository_interface:
repository_interface = get_formatted_repository_interface_from_id(id)
repo_status = repository_interface.is_offline()
if not use_cache:
if not repo_status:
misc_data = repository_interface.get_stats()
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first()
if misc_data:
repository.data = json.dumps(misc_data)
session.commit()
else:
try:
misc_data = json.loads(repository.data)
except TypeError:
misc_data = dict(data=repository.data)
misc_data['status'] = repo_status
info_dict = dict(
id=repository.id,
name=repository.name,
description=repository.description,
repo_id=repository.repo_id,
address=repository.address,
repository_data=repository.data,
concurrent_uses=repository.concurrent_uses,
timeout=repository.timeout,
data=misc_data,
cache_repo=repository.cache_repo,
repository_type=repository_type.name
)
return info_dict
# returns a list of snapshots and places them into the database from the
# repository if use_cache is set to False. Returns list of snapshots from
# the database if use_cache is set to True
def get_snapshots(id, use_cache=False):
repository_interface = get_formatted_repository_interface_from_id(id)
snapshots = []
if not use_cache and repository_interface.is_online():
snapshots = repository_interface.get_snapshots()
return snapshots if snapshots else {}
else:
with LocalSession() as session:
snapshots = session.query(Snapshot).filter_by(repository_id=id).all()
return snapshots
def get_snapshot(repo_id, snapshot_id, use_cache=False):
repository_interface = get_formatted_repository_interface_from_id(repo_id)
if not use_cache and repository_interface.is_online():
snapshot = repository_interface.get_snapshots(snapshot_id)[0]
return snapshot if snapshot else {}
else:
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first()
return snapshot
def insert_snapshots(items, repo_id):
with LocalSession() as session:
for item in items:
item['snap_id'] = item.pop('id')
item['snap_short_id'] = item.pop('short_id')
item['snap_time'] = item.pop('time')
if item['snap_time']:
main_time = item['snap_time'][:-7]
extra = item['snap_time'][-6:]
main_time = main_time + extra
# item['snap_time'] = datetime.strptime(main_time, "%Y-%m-%dT%H:%M:%S.%f%z")
item['snap_time'] = parser.parse(main_time)
new_snapshot = Snapshot(
snap_id=item.get('snap_id'),
snap_short_id=item.get('snap_short_id'),
snap_time=item.get('snap_time'),
hostname=item.get('hostname'),
username=item.get('username'),
tree=item.get('tree'),
repository_id=repo_id,
paths=json.dumps(item.get('paths')),
tags=json.dumps(item.get('tags'))
)
session.add(new_snapshot)
session.commit()
def delete_snapshot(repo_id, snapshot_id):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first()
session.delete(snapshot)
session.commit()
def get_snapshot_objects(snap_id, use_cache=False):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first()
repository = session.query(Repository).filter_by(id=snapshot.repository_id).first()
repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id)
if not use_cache and repository_interface.is_online():
# if the repo is online, we can purge the snapshots from db as we will
# just re-add them fresh from the actual repo
object_list = repository_interface.get_snapshot_ls(snap_id)
# if repository.cache_repo:
# sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface)
return object_list
else:
with LocalSession() as session:
snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all()
snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list]
return snapshot_dict_list
def delete_snapshot_objects(snap_id):
pass
def insert_snapshot_objects(items, snap_id):
with LocalSession() as session:
for item in items:
if item.get('mtime'):
try:
item['mtime'] = parser.parse(item['mtime'])
except ValueError:
item['mtime'] = None
item['modified_time'] = item.pop("mtime")
if item.get('atime'):
try:
item['atime'] = parser.parse(item['atime'])
except ValueError:
item['atime'] = None
item['accessed_time'] = item.pop("atime")
if item.get('ctime'):
try:
item['ctime'] = parser.parse(item['ctime'])
except ValueError:
item['ctime'] = None
item['created_time'] = item.pop("ctime")
new_item = SnapshotObject(
name=item.get('name'),
type=item.get('type'),
path=item.get('path'),
uid=item.get('uid'),
gid=item.get('gid'),
size=item.get('size'),
mode=item.get('mode'),
struct_type=item.get('struct_type'),
modified_time=item.get('modified_time'),
accessed_time=item.get('accessed_time'),
created_time=item.get('created_time'),
snapshot_id=snap_id
)
session.add(new_item)
session.commit()
def get_engine_repositories():
repository_list = []
with LocalSession() as session:
repositories = session.query(Repository).filter_by()
for repository in repositories:
repository_list.append((repository.id, repository.name))
return repository_list
def get_snapshot_info(id):
with LocalSession() as session:
snapshot = session.query(Snapshot).filter_by(snap_id=id).first()
if snapshot.paths:
try:
snapshot.paths = json.loads(snapshot.paths)
except ValueError:
pass
if snapshot.tags:
try:
snapshot.tags = json.loads(snapshot.tags)
except ValueError:
pass
return snapshot
def get_repository_status(id):
repository_interface = get_formatted_repository_interface_from_id(id)
status = repository_interface.is_online()
if status is None:
return "Couldn't get status"
else:
if status:
return "Online"
else:
return "Offline"
def get_repository_name(id):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
return repository.name
else:
return None
def get_repository_address(id):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
return repository.address
else:
return None
def get_repository_password(id):
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
return credential_manager.get_credential(repository.credential_group_id, "repo_password")
else:
return None
def get_formatted_repository_interface_from_id(id):
try:
with LocalSession() as session:
repository = session.query(Repository).filter_by(id=id).first()
if repository:
credential_list = credential_manager.get_group_credentials(repository.credential_group_id)
if credential_list:
repo_password = credential_list.pop('repo_password')
respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None, id)
return respository_interface
except Exception as e:
logger.error(e)
logger.error("trace:" + traceback.format_exc())
return None
|
[
"resticweb.misc.credential_manager.credential_manager.get_credential",
"json.loads",
"resticweb.misc.credential_manager.credential_manager.set_service_id",
"resticweb.misc.credential_manager.credential_manager.get_group_credentials",
"json.dumps",
"resticweb.tools.local_session.LocalSession",
"resticweb.dateutil.parser.parse",
"resticweb.misc.credential_manager.credential_manager.remove_credentials",
"traceback.format_exc",
"logging.getLogger"
] |
[((637, 669), 'logging.getLogger', 'logging.getLogger', (['"""debugLogger"""'], {}), "('debugLogger')\n", (654, 669), False, 'import logging\n'), ((820, 834), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (832, 834), False, 'from resticweb.tools.local_session import LocalSession\n'), ((1514, 1528), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (1526, 1528), False, 'from resticweb.tools.local_session import LocalSession\n'), ((2084, 2114), 'json.dumps', 'json.dumps', (["info['parameters']"], {}), "(info['parameters'])\n", (2094, 2114), False, 'import json\n'), ((2883, 2897), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (2895, 2897), False, 'from resticweb.tools.local_session import LocalSession\n'), ((3487, 3528), 'resticweb.misc.credential_manager.credential_manager.remove_credentials', 'credential_manager.remove_credentials', (['id'], {}), '(id)\n', (3524, 3528), False, 'from resticweb.misc.credential_manager import credential_manager\n'), ((3582, 3596), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (3594, 3596), False, 'from resticweb.tools.local_session import LocalSession\n'), ((4500, 4514), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (4512, 4514), False, 'from resticweb.tools.local_session import LocalSession\n'), ((6741, 6755), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (6753, 6755), False, 'from resticweb.tools.local_session import LocalSession\n'), ((7871, 7885), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (7883, 7885), False, 'from resticweb.tools.local_session import LocalSession\n'), ((8130, 8144), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (8142, 8144), False, 'from resticweb.tools.local_session import LocalSession\n'), ((9238, 9252), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (9250, 9252), False, 'from resticweb.tools.local_session import LocalSession\n'), ((10756, 10770), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (10768, 10770), False, 'from resticweb.tools.local_session import LocalSession\n'), ((11018, 11032), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (11030, 11032), False, 'from resticweb.tools.local_session import LocalSession\n'), ((11759, 11773), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (11771, 11773), False, 'from resticweb.tools.local_session import LocalSession\n'), ((11981, 11995), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (11993, 11995), False, 'from resticweb.tools.local_session import LocalSession\n'), ((12206, 12220), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (12218, 12220), False, 'from resticweb.tools.local_session import LocalSession\n'), ((12339, 12425), 'resticweb.misc.credential_manager.credential_manager.get_credential', 'credential_manager.get_credential', (['repository.credential_group_id', '"""repo_password"""'], {}), "(repository.credential_group_id,\n 'repo_password')\n", (12372, 12425), False, 'from resticweb.misc.credential_manager import credential_manager\n'), ((1674, 1753), 'resticweb.misc.credential_manager.credential_manager.set_service_id', 'credential_manager.set_service_id', (['repository.credential_group_id', "info['name']"], {}), "(repository.credential_group_id, info['name'])\n", (1707, 1753), False, 'from resticweb.misc.credential_manager import credential_manager\n'), ((4759, 4780), 'json.dumps', 'json.dumps', (['misc_data'], {}), '(misc_data)\n', (4769, 4780), False, 'import json\n'), ((6050, 6064), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (6062, 6064), False, 'from resticweb.tools.local_session import LocalSession\n'), ((6522, 6536), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (6534, 6536), False, 'from resticweb.tools.local_session import LocalSession\n'), ((8869, 8883), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (8881, 8883), False, 'from resticweb.tools.local_session import LocalSession\n'), ((11183, 11209), 'json.loads', 'json.loads', (['snapshot.paths'], {}), '(snapshot.paths)\n', (11193, 11209), False, 'import json\n'), ((11317, 11342), 'json.loads', 'json.loads', (['snapshot.tags'], {}), '(snapshot.tags)\n', (11327, 11342), False, 'import json\n'), ((12527, 12541), 'resticweb.tools.local_session.LocalSession', 'LocalSession', ([], {}), '()\n', (12539, 12541), False, 'from resticweb.tools.local_session import LocalSession\n'), ((4869, 4896), 'json.loads', 'json.loads', (['repository.data'], {}), '(repository.data)\n', (4879, 4896), False, 'import json\n'), ((7253, 7276), 'resticweb.dateutil.parser.parse', 'parser.parse', (['main_time'], {}), '(main_time)\n', (7265, 7276), False, 'from resticweb.dateutil import parser\n'), ((12691, 12763), 'resticweb.misc.credential_manager.credential_manager.get_group_credentials', 'credential_manager.get_group_credentials', (['repository.credential_group_id'], {}), '(repository.credential_group_id)\n', (12731, 12763), False, 'from resticweb.misc.credential_manager import credential_manager\n'), ((9383, 9410), 'resticweb.dateutil.parser.parse', 'parser.parse', (["item['mtime']"], {}), "(item['mtime'])\n", (9395, 9410), False, 'from resticweb.dateutil import parser\n'), ((9636, 9663), 'resticweb.dateutil.parser.parse', 'parser.parse', (["item['atime']"], {}), "(item['atime'])\n", (9648, 9663), False, 'from resticweb.dateutil import parser\n'), ((9889, 9916), 'resticweb.dateutil.parser.parse', 'parser.parse', (["item['ctime']"], {}), "(item['ctime'])\n", (9901, 9916), False, 'from resticweb.dateutil import parser\n'), ((13169, 13191), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13189, 13191), False, 'import traceback\n')]
|
import abc
import torch
from code_slac.network.base import BaseNetwork
from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df
class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta):
def __init__(self,
obs_dim,
seq_len,
obs_dims_used=None,
obs_dims_used_except=None,
):
super(SplitSeqClassifierBase, self).__init__()
self.used_dims = get_obs_dims_used_df(
obs_dim=obs_dim,
obs_dims_used=obs_dims_used,
obs_dims_used_except=obs_dims_used_except,
)
self._seq_len = seq_len
@property
def seq_len(self):
return self._seq_len
def _check_inputs(self, obs_seq, skill):
batch_dim = 0
seq_dim = 1
data_dim = -1
if skill is not None:
assert skill.size(batch_dim) == obs_seq.size(batch_dim)
assert skill.size(data_dim) == self.skill_dim
assert len(skill.shape) == 2
assert len(obs_seq.shape) == 3
def forward(self,
obs_seq,
skill=None
):
batch_dim = 0
seq_dim = 1
data_dim = -1
self._check_inputs(
obs_seq=obs_seq,
skill=skill
)
obs_seq = obs_seq[..., self.used_dims]
if self.training:
return self.train_forwardpass(
obs_seq=obs_seq,
skill=skill,
)
else:
with torch.no_grad():
return self.eval_forwardpass(
obs_seq=obs_seq,
skill=skill,
)
@abc.abstractmethod
def train_forwardpass(
self,
obs_seq,
skill,
):
raise NotImplementedError
@abc.abstractmethod
def eval_forwardpass(
self,
obs_seq,
skill,
**kwargs,
):
raise NotImplementedError
|
[
"latent_with_splitseqs.config.fun.get_obs_dims_used_df.get_obs_dims_used_df",
"torch.no_grad"
] |
[((486, 599), 'latent_with_splitseqs.config.fun.get_obs_dims_used_df.get_obs_dims_used_df', 'get_obs_dims_used_df', ([], {'obs_dim': 'obs_dim', 'obs_dims_used': 'obs_dims_used', 'obs_dims_used_except': 'obs_dims_used_except'}), '(obs_dim=obs_dim, obs_dims_used=obs_dims_used,\n obs_dims_used_except=obs_dims_used_except)\n', (506, 599), False, 'from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df\n'), ((1564, 1579), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1577, 1579), False, 'import torch\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-05-27
"""Step_01_model_comparison.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import cobra
from matplotlib import pyplot as plt
from matplotlib_venn import venn2
import pandas as pd
import My_def
from My_def.model_report import *
if __name__ == '__main__':
os.chdir('../../ComplementaryData/Step_02_DraftModels/')
# %% <load data>
Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json')
Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json')
Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json')
Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json')
Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json')
bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\t')
bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\t')
Lreu_ca_genes = [i.id for i in Lreu_ca.genes]
Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes]
Lreu_ca_reas = [i.id for i in Lreu_ca.reactions]
Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions]
Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites]
Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites]
# %% <fig compare Lreu_ca and Lreu_ca_gp>
# Lreu_ca_gp have more
figure, axes = plt.subplots(1, 3)
axes[0].set_title("gene")
axes[1].set_title("rea")
axes[2].set_title("met")
fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)],
('Normal','Gram positive' ), ax=axes[0])
# fg1.get_patch_by_id('10').set_color('Aquamarine')
fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)],
('Normal','Gram positive'), ax=axes[1])
fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)],
('Normal','Gram positive'), ax=axes[2])
plt.show()
Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes]
Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions]
Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites]
Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes]
Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions]
Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites]
Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes]
Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions]
Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites]
# %% <fig compare templated based method models and Lreu_ca_gp>
# just a overview
figure_2, axes = plt.subplots(1, 3)
axes[0].set_title("gene")
axes[1].set_title("rea")
axes[2].set_title("met")
fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes),
set(Lreu_from_iNF517_genes),
set(Lreu_from_iML1515_genes)],
('iBT721', 'iNF517','iML1515'), ax=axes[0])
fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas),
set(Lreu_from_iNF517_reas),
set(Lreu_from_iML1515_reas)],
('iBT721', 'iNF517','iML1515'), ax=axes[1])
fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets),
set(Lreu_from_iNF517_mets),
set(Lreu_from_iML1515_mets)],
('iBT721', 'iNF517','iML1515'), ax=axes[2])
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"cobra.io.load_json_model",
"matplotlib.pyplot.subplots",
"os.chdir"
] |
[((365, 421), 'os.chdir', 'os.chdir', (['"""../../ComplementaryData/Step_02_DraftModels/"""'], {}), "('../../ComplementaryData/Step_02_DraftModels/')\n", (373, 421), False, 'import os\n'), ((457, 505), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""CarveMe/Lreu_ca.json"""'], {}), "('CarveMe/Lreu_ca.json')\n", (481, 505), False, 'import cobra\n'), ((523, 574), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""CarveMe/Lreu_ca_gp.json"""'], {}), "('CarveMe/Lreu_ca_gp.json')\n", (547, 574), False, 'import cobra\n'), ((598, 656), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""Template/Lreu_from_iNF517.json"""'], {}), "('Template/Lreu_from_iNF517.json')\n", (622, 656), False, 'import cobra\n'), ((680, 738), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""Template/Lreu_from_iBT721.json"""'], {}), "('Template/Lreu_from_iBT721.json')\n", (704, 738), False, 'import cobra\n'), ((763, 822), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""Template/Lreu_from_iML1515.json"""'], {}), "('Template/Lreu_from_iML1515.json')\n", (787, 822), False, 'import cobra\n'), ((841, 898), 'pandas.read_csv', 'pd.read_csv', (['"""../bigg_database/bigg_rea_df.csv"""'], {'sep': '"""\t"""'}), "('../bigg_database/bigg_rea_df.csv', sep='\\t')\n", (852, 898), True, 'import pandas as pd\n'), ((917, 974), 'pandas.read_csv', 'pd.read_csv', (['"""../bigg_database/bigg_met_df.csv"""'], {'sep': '"""\t"""'}), "('../bigg_database/bigg_met_df.csv', sep='\\t')\n", (928, 974), True, 'import pandas as pd\n'), ((1403, 1421), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (1415, 1421), True, 'from matplotlib import pyplot as plt\n'), ((1919, 1929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1927, 1929), True, 'from matplotlib import pyplot as plt\n'), ((2685, 2703), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (2697, 2703), True, 'from matplotlib import pyplot as plt\n'), ((3444, 3454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3452, 3454), True, 'from matplotlib import pyplot as plt\n')]
|
from gensim import models
import json
import numpy as np
MODEL_VERSION = "glove-wiki-gigaword-300"
model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION)
def get_word_vec(word_list):
"""
This method will get the vector of the given word
:param word_list: list of a single word string
:return: the vector list of this word
"""
result = {"status_code": "0000"}
if len(word_list) > 1:
result["status_code"] = "0001"
result["result_info"] = "Expect one wordString for getVec"
return result
word = word_list[0]
try:
vec = model.get_vector(word)
result["vec"] = str(np.array(vec).tolist())
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
return result
def get_sim_by_word(word_list):
"""
This method will return a list of the similar words by the given word
:param word_list: list of a single word string
:return: the sim words list of the given word
"""
result = {"status_code": "0000"}
if len(word_list) > 1:
result["status_code"] = "0001"
result["result_info"] = "Expect one wordString for getSim"
return result
word = word_list[0]
try:
sim_words = model.similar_by_word(word)
result["sim_words"] = sim_words
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
return result
def get_similarity_between(word_list):
"""
This method will get the similarity of two given words
:param word_list: list of two words A B for similarity calculation
:return: cosine similarity of the two given words
"""
result = {"status_code": "0000"}
if len(word_list) != 2:
result["status_code"] = "0001"
result["result_info"] = "Expect two wordString for getSimBetween"
return result
try:
word_a = word_list[0]
word_b = word_list[1]
similarity = model.similarity(word_a, word_b)
result["similarity"] = str(similarity)
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
return result
method_dispatcher = {
"getVec": lambda word_list,: get_word_vec(word_list),
"getSim": lambda word_list,: get_sim_by_word(word_list),
"getSimBetween": lambda word_list,: get_similarity_between(word_list)
}
def validate_event(event):
"""
This function will validate the event send from API gateway to Lambda and raise exception if exists
:param event:
:return:
"""
params = event["multiValueQueryStringParameters"]
if "method" not in params.keys() or "wordString" not in params.keys():
raise Exception('"method" and "wordString" are expected as the Query Params')
# flag = False
method = params.get("method")
if len(method) != 1:
# flag = False
raise Exception('Expect one value for method param')
method = method[0]
if method not in method_dispatcher.keys():
# flag = False
raise Exception('method must be in one of ' + str(list(method_dispatcher.keys())))
def lambda_handler(event, context):
result = {}
response = {
'statusCode': 200,
'body': ""
}
try:
validate_event(event)
except Exception as e:
result["status_code"] = "0001"
result["result_info"] = str(e)
result["request_info"] = event["multiValueQueryStringParameters"]
result["model_version"] = MODEL_VERSION
response["body"] = json.dumps(result)
return response
params = event["multiValueQueryStringParameters"]
method = params["method"][0]
word_list = params["wordString"]
result = method_dispatcher[method](word_list)
result["request_info"] = event["multiValueQueryStringParameters"]
result["model_version"] = MODEL_VERSION
response["body"] = json.dumps(result)
print(response)
return response
if __name__ == "__main__":
f = open('mock_event.json')
mock_event = json.load(f)
f.close()
print(lambda_handler(mock_event, context=""))
|
[
"json.load",
"numpy.array",
"gensim.models.KeyedVectors.load_word2vec_format",
"json.dumps"
] |
[((109, 164), 'gensim.models.KeyedVectors.load_word2vec_format', 'models.KeyedVectors.load_word2vec_format', (['MODEL_VERSION'], {}), '(MODEL_VERSION)\n', (149, 164), False, 'from gensim import models\n'), ((3927, 3945), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3937, 3945), False, 'import json\n'), ((4064, 4076), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4073, 4076), False, 'import json\n'), ((3572, 3590), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3582, 3590), False, 'import json\n'), ((649, 662), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (657, 662), True, 'import numpy as np\n')]
|
from time import sleep
from re import search, finditer
from copy import copy
# from pprint import pprint
from slackclient import SlackClient
from bot.message import Message
class Bot:
def __init__(self, conf):
# instantiate Slack client
self.slack_client = SlackClient(conf.slack_bot_token)
# starterbot's user ID in Slack: value is assigned
# after the bot starts up
self.starterbot_id = None
# constants
self.RTM_READ_DELAY = 2 # 1 second delay between reading from RTM
self.MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
self.LINK_URL = conf.link_url
self.MATCH_PATTERN = conf.match_pattern
# list of channel the bot is member of
self.g_member_channel = []
# context: in which thread the link was already provided
self.message_context = {}
def chat(self):
if self.slack_client.rtm_connect(with_team_state=False):
print("Starter Bot connected and running!")
self.get_list_of_channels()
self.bot_loop()
else:
print("Connection failed. Exception traceback printed above.")
def bot_loop(self):
# Read bot's user ID by calling Web API method `auth.test`
self.slack_client.api_call("auth.test")["user_id"]
while True:
bot_message = self.parse_events_in_channel(self.slack_client.rtm_read())
if bot_message.channel:
self.respond_in_thread(bot_message)
sleep(self.RTM_READ_DELAY)
def parse_direct_mention(self, message_text):
"""
Finds a direct mention (a mention that is at the beginning)
in message text and returns the user ID which was mentioned.
If there is no direct mention, returns None
"""
matches = search(self.MENTION_REGEX, message_text)
# the first group contains the username,
# the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def get_list_of_channels(self):
""" print the list of available channels """
channels = self.slack_client.api_call(
"channels.list",
exclude_archived=1
)
self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']]
# print("available channels:")
# pprint(channels)
print("I am member of {} channels: {}"
.format(len(self.g_member_channel),
",".join([c['name'] for c in self.g_member_channel])))
def check_if_member(self, channel):
""" checking if the bot is member of a given channel """
return channel in [channel['id'] for channel in self.g_member_channel]
def parse_events_in_channel(self, events):
"""
Selecting events of type message with no subtype
which are posted in channel where the bot is
"""
# print("DEBUG: my channels: {}".format(g_member_channel))
for event in events:
# pprint(event)
# Parsing only messages in the channels where the bot is member
if event["type"] != "message" or "subtype" in event or \
not self.check_if_member(event["channel"]):
# print("not for me: type:{}".format(event))
continue
# analyse message to see if we can suggest some links
analysed_message = self.analyse_message(event['text'])
thread_ts = event['ts']
if 'thread_ts' in event.keys():
thread_ts = event['thread_ts']
if not analysed_message:
return Message(None, None, None)
analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts)
if not analysed_message_no_repeat:
return Message(None, None, None)
return Message(event["channel"], thread_ts,
analysed_message_no_repeat, self.LINK_URL)
return Message(None, None, None)
def analyse_message(self, message):
"""
find matching sub string in the message and
returns a list of formatted links
"""
pattern = self.MATCH_PATTERN
matchs = []
for i in finditer(pattern, message):
value = i.group(1)
if value not in matchs:
matchs.append(value)
if not len(matchs):
return
return matchs
def dont_repeat_in_thread(self, analysed_messages, thread_ts):
""" Remove message from analysed message if it was already sent in the same
message thread.
"""
# pprint(self.message_context)
no_repeat_messages = copy(analysed_messages)
for message in analysed_messages:
if thread_ts in self.message_context.keys():
if message in self.message_context[thread_ts]:
no_repeat_messages.remove(message)
return no_repeat_messages
def respond_in_thread(self, bot_message):
"""Sends the response back to the channel
în a thread
"""
# Add message to the message context to avoid
# repeating same message in a thread
if bot_message.thread_ts not in self.message_context.keys():
self.message_context[bot_message.thread_ts] = []
self.message_context[bot_message.thread_ts].extend(bot_message.raw_message)
self.slack_client.api_call(
"chat.postMessage",
channel=bot_message.channel,
thread_ts=bot_message.thread_ts,
text=bot_message.formatted_message
)
|
[
"bot.message.Message",
"re.finditer",
"slackclient.SlackClient",
"copy.copy",
"time.sleep",
"re.search"
] |
[((282, 315), 'slackclient.SlackClient', 'SlackClient', (['conf.slack_bot_token'], {}), '(conf.slack_bot_token)\n', (293, 315), False, 'from slackclient import SlackClient\n'), ((1822, 1862), 're.search', 'search', (['self.MENTION_REGEX', 'message_text'], {}), '(self.MENTION_REGEX, message_text)\n', (1828, 1862), False, 'from re import search, finditer\n'), ((4074, 4099), 'bot.message.Message', 'Message', (['None', 'None', 'None'], {}), '(None, None, None)\n', (4081, 4099), False, 'from bot.message import Message\n'), ((4333, 4359), 're.finditer', 'finditer', (['pattern', 'message'], {}), '(pattern, message)\n', (4341, 4359), False, 'from re import search, finditer\n'), ((4792, 4815), 'copy.copy', 'copy', (['analysed_messages'], {}), '(analysed_messages)\n', (4796, 4815), False, 'from copy import copy\n'), ((1513, 1539), 'time.sleep', 'sleep', (['self.RTM_READ_DELAY'], {}), '(self.RTM_READ_DELAY)\n', (1518, 1539), False, 'from time import sleep\n'), ((3952, 4031), 'bot.message.Message', 'Message', (["event['channel']", 'thread_ts', 'analysed_message_no_repeat', 'self.LINK_URL'], {}), "(event['channel'], thread_ts, analysed_message_no_repeat, self.LINK_URL)\n", (3959, 4031), False, 'from bot.message import Message\n'), ((3714, 3739), 'bot.message.Message', 'Message', (['None', 'None', 'None'], {}), '(None, None, None)\n', (3721, 3739), False, 'from bot.message import Message\n'), ((3907, 3932), 'bot.message.Message', 'Message', (['None', 'None', 'None'], {}), '(None, None, None)\n', (3914, 3932), False, 'from bot.message import Message\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
project_name = "reco-tut-mlh"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# In[2]:
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[34]:
get_ipython().system(u'git status')
# In[35]:
get_ipython().system(u'git add . && git commit -m \'commit\' && git push origin "{branch}"')
# In[7]:
import sys
sys.path.insert(0, './code')
# ---
# # Collaborative Filtering Comparison
#
# In this notebook we compare different recommendation systems starting with the state-of-the-art LightGCN and going back to the winning algorithm for 2009's Netflix Prize competition, SVD++.
#
# Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own individual notebooks where we go more indepth, especially LightGCN and NGCF, where we implemented them from scratch in Tensorflow.
#
# The last cell compares the performance of the different models using ranking metrics:
#
#
# * Precision@k
# * Recall@k
# * Mean Average Precision (MAP)
# * Normalized Discounted Cumulative Gain (NDCG)
#
# where $k=10$
#
#
# # Imports
# In[4]:
get_ipython().system(u'pip install -q surprise')
# In[8]:
import math
import numpy as np
import os
import pandas as pd
import random
import requests
import scipy.sparse as sp
import surprise
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.python.framework.ops import disable_eager_execution
from tqdm import tqdm
from utils import stratified_split, numpy_stratified_split
import build_features
import metrics
from models import SVAE
from models.GCN import LightGCN, NGCF
# # Prepare data
# In[9]:
fp = os.path.join('./data/bronze', 'u.data')
raw_data = pd.read_csv(fp, sep='\t', names=['userId', 'movieId', 'rating', 'timestamp'])
print(f'Shape: {raw_data.shape}')
raw_data.sample(10, random_state=123)
# In[10]:
# Load movie titles.
fp = os.path.join('./data/bronze', 'u.item')
movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1')
print(f'Shape: {movie_titles.shape}')
movie_titles.sample(10, random_state=123)
# In[15]:
train_size = 0.75
train, test = stratified_split(raw_data, 'userId', train_size)
print(f'Train Shape: {train.shape}')
print(f'Test Shape: {test.shape}')
print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}')
# In[16]:
combined = train.append(test)
n_users = combined['userId'].nunique()
print('Number of users:', n_users)
n_movies = combined['movieId'].nunique()
print('Number of movies:', n_movies)
# In[17]:
# Create DataFrame with reset index of 0-n_movies.
movie_new = combined[['movieId']].drop_duplicates()
movie_new['movieId_new'] = np.arange(len(movie_new))
train_reindex = pd.merge(train, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
train_reindex['userId_new'] = train_reindex['userId'] - 1
train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']]
test_reindex = pd.merge(test, movie_new, on='movieId', how='left')
# Reset index to 0-n_users.
test_reindex['userId_new'] = test_reindex['userId'] - 1
test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']]
# Create dictionaries so we can convert to and from indexes
item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new']))
id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId']))
user2id = dict(zip(train['userId'], train_reindex['userId_new']))
id2user = dict(zip(train_reindex['userId_new'], train['userId']))
# In[18]:
# Create user-item graph (sparse matix where users are rows and movies are columns.
# 1 if a user reviewed that movie, 0 if they didn't).
R = sp.dok_matrix((n_users, n_movies), dtype=np.float32)
R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1
# Create the adjaceny matrix with the user-item graph.
adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32)
# List of lists.
adj_mat.tolil()
R = R.tolil()
# Put together adjacency matrix. Movies and users are nodes/vertices.
# 1 if the movie and user are connected.
adj_mat[:n_users, n_users:] = R
adj_mat[n_users:, :n_users] = R.T
adj_mat
# In[19]:
# Calculate degree matrix D (for every row count the number of nonzero entries)
D_values = np.array(adj_mat.sum(1))
# Square root and inverse.
D_inv_values = np.power(D_values + 1e-9, -0.5).flatten()
D_inv_values[np.isinf(D_inv_values)] = 0.0
# Create sparse matrix with the values of D^(-0.5) are the diagonals.
D_inv_sq_root = sp.diags(D_inv_values)
# Eval (D^-0.5 * A * D^-0.5).
norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root)
# In[20]:
# to COOrdinate format first ((row, column), data)
coo = norm_adj_mat.tocoo().astype(np.float32)
# create an index that will tell SparseTensor where the non-zero points are
indices = np.mat([coo.row, coo.col]).transpose()
# covert to sparse tensor
A_tilde = tf.SparseTensor(indices, coo.data, coo.shape)
A_tilde
# # Train models
# ## Graph Convoultional Networks (GCNs)
# ### Light Graph Convolution Network (LightGCN)
# In[21]:
light_model = LightGCN(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = 3)
# In[22]:
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)
# ### Neural Graph Collaborative Filtering (NGCF)
# In[23]:
ngcf_model = NGCF(A_tilde,
n_users = n_users,
n_items = n_movies,
n_layers = 3
)
ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)
# ### Recommend with LightGCN and NGCF
# In[24]:
# Convert test user ids to the new ids
users = np.array([user2id[x] for x in test['userId'].unique()])
recs = []
for model in [light_model, ngcf_model]:
recommendations = model.recommend(users, k=10)
recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item})
recommendations = recommendations.merge(movie_titles,
how='left',
on='movieId'
)[['userId', 'movieId', 'title', 'prediction']]
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# ## Standard Variational Autoencoder (SVAE)
# In[26]:
# Binarize the data (only keep ratings >= 4)
df_preferred = raw_data[raw_data['rating'] > 3.5]
df_low_rating = raw_data[raw_data['rating'] <= 3.5]
df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5)
df = df.groupby('movieId').filter(lambda x: len(x) >= 1)
# Obtain both usercount and itemcount after filtering
usercount = df[['userId']].groupby('userId', as_index = False).size()
itemcount = df[['movieId']].groupby('movieId', as_index = False).size()
unique_users =sorted(df.userId.unique())
np.random.seed(123)
unique_users = np.random.permutation(unique_users)
HELDOUT_USERS = 200
# Create train/validation/test users
n_users = len(unique_users)
train_users = unique_users[:(n_users - HELDOUT_USERS * 2)]
val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)]
test_users = unique_users[(n_users - HELDOUT_USERS):]
train_set = df.loc[df['userId'].isin(train_users)]
val_set = df.loc[df['userId'].isin(val_users)]
test_set = df.loc[df['userId'].isin(test_users)]
unique_train_items = pd.unique(train_set['movieId'])
val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)]
test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)]
# Instantiate the sparse matrix generation for train, validation and test sets
# use list of unique items from training set for all sets
am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items)
am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items)
am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items)
# Obtain the sparse matrix for train, validation and test sets
train_data, _, _ = am_train.gen_affinity_matrix()
val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix()
test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix()
# Split validation and test data into training and testing parts
val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123)
test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123)
# Binarize train, validation and test data
train_data = np.where(train_data > 3.5, 1.0, 0.0)
val_data = np.where(val_data > 3.5, 1.0, 0.0)
test_data = np.where(test_data > 3.5, 1.0, 0.0)
# Binarize validation data
val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0)
val_data_te_ratings = val_data_te.copy()
val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0)
# Binarize test data: training part
test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0)
# Binarize test data: testing part (save non-binary version in the separate object, will be used for calculating NDCG)
test_data_te_ratings = test_data_te.copy()
test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0)
# retrieve real ratings from initial dataset
test_data_te_ratings=pd.DataFrame(test_data_te_ratings)
val_data_te_ratings=pd.DataFrame(val_data_te_ratings)
for index,i in df_low_rating.iterrows():
user_old= i['userId'] # old value
item_old=i['movieId'] # old value
if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) :
user_new=test_map_users.get(user_old) # new value
item_new=test_map_items.get(item_old) # new value
rating=i['rating']
test_data_te_ratings.at[user_new,item_new]= rating
if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) :
user_new=val_map_users.get(user_old) # new value
item_new=val_map_items.get(item_old) # new value
rating=i['rating']
val_data_te_ratings.at[user_new,item_new]= rating
val_data_te_ratings=val_data_te_ratings.to_numpy()
test_data_te_ratings=test_data_te_ratings.to_numpy()
# In[27]:
disable_eager_execution()
svae_model = SVAE.StandardVAE(n_users=train_data.shape[0],
original_dim=train_data.shape[1],
intermediate_dim=200,
latent_dim=64,
n_epochs=400,
batch_size=100,
k=10,
verbose=0,
seed=123,
drop_encoder=0.5,
drop_decoder=0.5,
annealing=False,
beta=1.0
)
svae_model.fit(x_train=train_data,
x_valid=val_data,
x_val_tr=val_data_tr,
x_val_te=val_data_te_ratings,
mapper=am_val
)
# ### Recommend with SVAE
# In[28]:
# Model prediction on the training part of test set
top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True)
# Convert sparse matrix back to df
recommendations = am_test.map_back_sparse(top_k, kind='prediction')
test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# ## Singular Value Decomposition (SVD)
# ### SVD++
# In[29]:
surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset()
svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True)
svdpp.fit(surprise_train)
# ### SVD
# In[30]:
svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True)
svd.fit(surprise_train)
# ### Recommend with SVD++ and SVD
# In[31]:
for model in [svdpp, svd]:
predictions = []
users = train['userId'].unique()
items = train['movieId'].unique()
for user in users:
for item in items:
predictions.append([user, item, model.predict(user, item).est])
predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction'])
# Remove movies already seen by users
# Create column of all 1s
temp = train[['userId', 'movieId']].copy()
temp['seen'] = 1
# Outer join and remove movies that have alread been seen (seen=1)
merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how="outer")
merged = merged[merged['seen'].isnull()].drop('seen', axis=1)
# Create filter for users that appear in both the train and test set
common_users = set(test['userId']).intersection(set(predictions['userId']))
# Filter the test and predictions so they have the same users between them
test_common = test[test['userId'].isin(common_users)]
svd_pred_common = merged[merged['userId'].isin(common_users)]
if len(set(merged['userId'])) != len(set(test['userId'])):
print('Number of users in train and test are NOT equal')
print(f"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}")
print(f"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}")
continue
# From the predictions, we want only the top k for each user,
# not all the recommendations.
# Extract the top k recommendations from the predictions
top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True)
top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1
top_k = top_movies.copy()
top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
recs.append(top_k)
# # Compare performance
# Looking at all 5 of our models, we can see that the state-of-the-art model LightGCN vastly outperforms all other models. When compared to SVD++, a widely used algorithm during the Netflix Prize competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k by 18%, MAP by 12%, and NDCG by 35%**.
#
# NGCF is the older sister model to LightGCN, but only by a single year. We can see how LightGCN improves in ranking metrics compared to NGCF by simply removing unnecessary operations.
#
# In conclusion, this demonstrates how far recommendation systems have advanced since 2009, and how new model architectures with notable performance increases can be developed in the span of just 1-2 years.
# In[32]:
model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD']
comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG'])
# Convert test user ids to the new ids
users = np.array([user2id[x] for x in test['userId'].unique()])
for rec, name in zip(recs, model_names):
tester = test_df if name == 'SVAE' else test
pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank')
rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank')
map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank')
ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank')
comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg]
# In[33]:
comparison
# # References:
#
# 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126
# 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108
# 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb
# 4. <NAME>, Netflix Prize and SVD, 2014, https://www.semanticscholar.org/paper/Netflix-Prize-and-SVD-Gower/ce7b81b46939d7852dbb30538a7796e69fdd407c
#
|
[
"tensorflow.python.framework.ops.disable_eager_execution",
"numpy.random.seed",
"models.GCN.LightGCN",
"pandas.read_csv",
"build_features.AffinityMatrix",
"os.path.join",
"metrics.mean_average_precision",
"numpy.mat",
"pandas.DataFrame",
"sys.path.append",
"numpy.power",
"pandas.merge",
"surprise.Reader",
"os.path.exists",
"metrics.recall_at_k",
"tensorflow.keras.optimizers.Adam",
"models.GCN.NGCF",
"tensorflow.SparseTensor",
"metrics.ndcg",
"scipy.sparse.dok_matrix",
"surprise.SVDpp",
"surprise.SVD",
"scipy.sparse.diags",
"numpy.isinf",
"metrics.precision_at_k",
"utils.numpy_stratified_split",
"numpy.random.permutation",
"models.SVAE.StandardVAE",
"utils.stratified_split",
"sys.path.insert",
"pandas.unique",
"numpy.where"
] |
[((145, 183), 'os.path.join', 'os.path.join', (['"""/content"""', 'project_name'], {}), "('/content', project_name)\n", (157, 183), False, 'import os\n'), ((1194, 1222), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./code"""'], {}), "(0, './code')\n", (1209, 1222), False, 'import sys\n'), ((2517, 2556), 'os.path.join', 'os.path.join', (['"""./data/bronze"""', '"""u.data"""'], {}), "('./data/bronze', 'u.data')\n", (2529, 2556), False, 'import os\n'), ((2568, 2645), 'pandas.read_csv', 'pd.read_csv', (['fp'], {'sep': '"""\t"""', 'names': "['userId', 'movieId', 'rating', 'timestamp']"}), "(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp'])\n", (2579, 2645), True, 'import pandas as pd\n'), ((2758, 2797), 'os.path.join', 'os.path.join', (['"""./data/bronze"""', '"""u.item"""'], {}), "('./data/bronze', 'u.item')\n", (2770, 2797), False, 'import os\n'), ((3035, 3083), 'utils.stratified_split', 'stratified_split', (['raw_data', '"""userId"""', 'train_size'], {}), "(raw_data, 'userId', train_size)\n", (3051, 3083), False, 'from utils import stratified_split, numpy_stratified_split\n'), ((3622, 3674), 'pandas.merge', 'pd.merge', (['train', 'movie_new'], {'on': '"""movieId"""', 'how': '"""left"""'}), "(train, movie_new, on='movieId', how='left')\n", (3630, 3674), True, 'import pandas as pd\n'), ((3850, 3901), 'pandas.merge', 'pd.merge', (['test', 'movie_new'], {'on': '"""movieId"""', 'how': '"""left"""'}), "(test, movie_new, on='movieId', how='left')\n", (3858, 3901), True, 'import pandas as pd\n'), ((4540, 4592), 'scipy.sparse.dok_matrix', 'sp.dok_matrix', (['(n_users, n_movies)'], {'dtype': 'np.float32'}), '((n_users, n_movies), dtype=np.float32)\n', (4553, 4592), True, 'import scipy.sparse as sp\n'), ((4724, 4797), 'scipy.sparse.dok_matrix', 'sp.dok_matrix', (['(n_users + n_movies, n_users + n_movies)'], {'dtype': 'np.float32'}), '((n_users + n_movies, n_users + n_movies), dtype=np.float32)\n', (4737, 4797), True, 'import scipy.sparse as sp\n'), ((5380, 5402), 'scipy.sparse.diags', 'sp.diags', (['D_inv_values'], {}), '(D_inv_values)\n', (5388, 5402), True, 'import scipy.sparse as sp\n'), ((5769, 5814), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'coo.data', 'coo.shape'], {}), '(indices, coo.data, coo.shape)\n', (5784, 5814), True, 'import tensorflow as tf\n'), ((5961, 6025), 'models.GCN.LightGCN', 'LightGCN', (['A_tilde'], {'n_users': 'n_users', 'n_items': 'n_movies', 'n_layers': '(3)'}), '(A_tilde, n_users=n_users, n_items=n_movies, n_layers=3)\n', (5969, 6025), False, 'from models.GCN import LightGCN, NGCF\n'), ((6109, 6153), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (6133, 6153), True, 'import tensorflow as tf\n'), ((6297, 6357), 'models.GCN.NGCF', 'NGCF', (['A_tilde'], {'n_users': 'n_users', 'n_items': 'n_movies', 'n_layers': '(3)'}), '(A_tilde, n_users=n_users, n_items=n_movies, n_layers=3)\n', (6301, 6357), False, 'from models.GCN import LightGCN, NGCF\n'), ((7997, 8016), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (8011, 8016), True, 'import numpy as np\n'), ((8032, 8067), 'numpy.random.permutation', 'np.random.permutation', (['unique_users'], {}), '(unique_users)\n', (8053, 8067), True, 'import numpy as np\n'), ((8521, 8552), 'pandas.unique', 'pd.unique', (["train_set['movieId']"], {}), "(train_set['movieId'])\n", (8530, 8552), True, 'import pandas as pd\n'), ((8839, 8913), 'build_features.AffinityMatrix', 'build_features.AffinityMatrix', ([], {'df': 'train_set', 'items_list': 'unique_train_items'}), '(df=train_set, items_list=unique_train_items)\n', (8868, 8913), False, 'import build_features\n'), ((8923, 8995), 'build_features.AffinityMatrix', 'build_features.AffinityMatrix', ([], {'df': 'val_set', 'items_list': 'unique_train_items'}), '(df=val_set, items_list=unique_train_items)\n', (8952, 8995), False, 'import build_features\n'), ((9006, 9079), 'build_features.AffinityMatrix', 'build_features.AffinityMatrix', ([], {'df': 'test_set', 'items_list': 'unique_train_items'}), '(df=test_set, items_list=unique_train_items)\n', (9035, 9079), False, 'import build_features\n'), ((9431, 9485), 'utils.numpy_stratified_split', 'numpy_stratified_split', (['val_data'], {'ratio': '(0.75)', 'seed': '(123)'}), '(val_data, ratio=0.75, seed=123)\n', (9453, 9485), False, 'from utils import stratified_split, numpy_stratified_split\n'), ((9515, 9570), 'utils.numpy_stratified_split', 'numpy_stratified_split', (['test_data'], {'ratio': '(0.75)', 'seed': '(123)'}), '(test_data, ratio=0.75, seed=123)\n', (9537, 9570), False, 'from utils import stratified_split, numpy_stratified_split\n'), ((9628, 9664), 'numpy.where', 'np.where', (['(train_data > 3.5)', '(1.0)', '(0.0)'], {}), '(train_data > 3.5, 1.0, 0.0)\n', (9636, 9664), True, 'import numpy as np\n'), ((9676, 9710), 'numpy.where', 'np.where', (['(val_data > 3.5)', '(1.0)', '(0.0)'], {}), '(val_data > 3.5, 1.0, 0.0)\n', (9684, 9710), True, 'import numpy as np\n'), ((9723, 9758), 'numpy.where', 'np.where', (['(test_data > 3.5)', '(1.0)', '(0.0)'], {}), '(test_data > 3.5, 1.0, 0.0)\n', (9731, 9758), True, 'import numpy as np\n'), ((9801, 9838), 'numpy.where', 'np.where', (['(val_data_tr > 3.5)', '(1.0)', '(0.0)'], {}), '(val_data_tr > 3.5, 1.0, 0.0)\n', (9809, 9838), True, 'import numpy as np\n'), ((9894, 9931), 'numpy.where', 'np.where', (['(val_data_te > 3.5)', '(1.0)', '(0.0)'], {}), '(val_data_te > 3.5, 1.0, 0.0)\n', (9902, 9931), True, 'import numpy as np\n'), ((9985, 10023), 'numpy.where', 'np.where', (['(test_data_tr > 3.5)', '(1.0)', '(0.0)'], {}), '(test_data_tr > 3.5, 1.0, 0.0)\n', (9993, 10023), True, 'import numpy as np\n'), ((10202, 10240), 'numpy.where', 'np.where', (['(test_data_te > 3.5)', '(1.0)', '(0.0)'], {}), '(test_data_te > 3.5, 1.0, 0.0)\n', (10210, 10240), True, 'import numpy as np\n'), ((10309, 10343), 'pandas.DataFrame', 'pd.DataFrame', (['test_data_te_ratings'], {}), '(test_data_te_ratings)\n', (10321, 10343), True, 'import pandas as pd\n'), ((10364, 10397), 'pandas.DataFrame', 'pd.DataFrame', (['val_data_te_ratings'], {}), '(val_data_te_ratings)\n', (10376, 10397), True, 'import pandas as pd\n'), ((11259, 11284), 'tensorflow.python.framework.ops.disable_eager_execution', 'disable_eager_execution', ([], {}), '()\n', (11282, 11284), False, 'from tensorflow.python.framework.ops import disable_eager_execution\n'), ((11298, 11547), 'models.SVAE.StandardVAE', 'SVAE.StandardVAE', ([], {'n_users': 'train_data.shape[0]', 'original_dim': 'train_data.shape[1]', 'intermediate_dim': '(200)', 'latent_dim': '(64)', 'n_epochs': '(400)', 'batch_size': '(100)', 'k': '(10)', 'verbose': '(0)', 'seed': '(123)', 'drop_encoder': '(0.5)', 'drop_decoder': '(0.5)', 'annealing': '(False)', 'beta': '(1.0)'}), '(n_users=train_data.shape[0], original_dim=train_data.shape\n [1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100,\n k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5,\n annealing=False, beta=1.0)\n', (11314, 11547), False, 'from models import SVAE\n'), ((13050, 13121), 'surprise.SVDpp', 'surprise.SVDpp', ([], {'random_state': '(0)', 'n_factors': '(64)', 'n_epochs': '(10)', 'verbose': '(True)'}), '(random_state=0, n_factors=64, n_epochs=10, verbose=True)\n', (13064, 13121), False, 'import surprise\n'), ((13179, 13248), 'surprise.SVD', 'surprise.SVD', ([], {'random_state': '(0)', 'n_factors': '(64)', 'n_epochs': '(10)', 'verbose': '(True)'}), '(random_state=0, n_factors=64, n_epochs=10, verbose=True)\n', (13191, 13248), False, 'import surprise\n'), ((16177, 16254), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']"}), "(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG'])\n", (16189, 16254), True, 'import pandas as pd\n'), ((204, 232), 'os.path.exists', 'os.path.exists', (['project_path'], {}), '(project_path)\n', (218, 232), False, 'import os\n'), ((517, 538), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (532, 538), False, 'import sys\n'), ((5262, 5284), 'numpy.isinf', 'np.isinf', (['D_inv_values'], {}), '(D_inv_values)\n', (5270, 5284), True, 'import numpy as np\n'), ((13600, 13670), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {'columns': "['userId', 'movieId', 'prediction']"}), "(predictions, columns=['userId', 'movieId', 'prediction'])\n", (13612, 13670), True, 'import pandas as pd\n'), ((13897, 13963), 'pandas.merge', 'pd.merge', (['temp', 'predictions'], {'on': "['userId', 'movieId']", 'how': '"""outer"""'}), "(temp, predictions, on=['userId', 'movieId'], how='outer')\n", (13905, 13963), True, 'import pandas as pd\n'), ((16461, 16525), 'metrics.precision_at_k', 'metrics.precision_at_k', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16483, 16525), False, 'import metrics\n'), ((16536, 16597), 'metrics.recall_at_k', 'metrics.recall_at_k', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16555, 16597), False, 'import metrics\n'), ((16608, 16680), 'metrics.mean_average_precision', 'metrics.mean_average_precision', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16638, 16680), False, 'import metrics\n'), ((16692, 16746), 'metrics.ndcg', 'metrics.ndcg', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16704, 16746), False, 'import metrics\n'), ((5206, 5238), 'numpy.power', 'np.power', (['(D_values + 1e-09)', '(-0.5)'], {}), '(D_values + 1e-09, -0.5)\n', (5214, 5238), True, 'import numpy as np\n'), ((5693, 5719), 'numpy.mat', 'np.mat', (['[coo.row, coo.col]'], {}), '([coo.row, coo.col])\n', (5699, 5719), True, 'import numpy as np\n'), ((12992, 13018), 'surprise.Reader', 'surprise.Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (13007, 13018), False, 'import surprise\n')]
|
import atexit
import os
import sys
import time
from contextlib import suppress
from signal import SIGTERM
class Daemon:
def __init__(self, pidfile=None):
self.pidfile = pidfile or os.path.join("/var/run/exhal.service")
def start(self):
try:
self.get_pidfile()
except IOError:
pass
finally:
self.daemonize()
self.run()
def stop(self):
try:
pid = self.get_pidfile()
except IOError:
return
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
e = str(err.args)
if e.find("No such process") > 0:
self.delete_pidfile()
else:
sys.exit(1)
def daemonize(self):
self.fork()
os.chdir("/")
os.setsid()
os.umask(0)
self.fork()
atexit.register(self.delete_pidfile)
self.create_pidfile()
def fork(self):
try:
if os.fork() > 0:
sys.exit(0)
except OSError as err:
self.error(f"failed to fork a child process. Reason: {err}\n")
def delete_pidfile(self):
with suppress(FileNotFoundError):
os.remove(self.pidfile)
def create_pidfile(self):
with open(self.pidfile, "w+") as fh:
fh.write(str(os.getpid()) + "\n")
def get_pidfile(self):
with open(self.pidfile, "r") as fh:
return int(fh.read().strip())
def error(self, message):
sys.stderr.write(f"{message}\n")
sys.exit(1)
def restart(self):
self.stop()
self.start()
def run(self):
raise NotImplementedError
|
[
"atexit.register",
"os.remove",
"os.getpid",
"contextlib.suppress",
"time.sleep",
"os.umask",
"os.kill",
"os.setsid",
"os.fork",
"sys.stderr.write",
"os.path.join",
"os.chdir",
"sys.exit"
] |
[((873, 886), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (881, 886), False, 'import os\n'), ((895, 906), 'os.setsid', 'os.setsid', ([], {}), '()\n', (904, 906), False, 'import os\n'), ((915, 926), 'os.umask', 'os.umask', (['(0)'], {}), '(0)\n', (923, 926), False, 'import os\n'), ((957, 993), 'atexit.register', 'atexit.register', (['self.delete_pidfile'], {}), '(self.delete_pidfile)\n', (972, 993), False, 'import atexit\n'), ((1606, 1638), 'sys.stderr.write', 'sys.stderr.write', (['f"""{message}\n"""'], {}), "(f'{message}\\n')\n", (1622, 1638), False, 'import sys\n'), ((1647, 1658), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1655, 1658), False, 'import sys\n'), ((194, 232), 'os.path.join', 'os.path.join', (['"""/var/run/exhal.service"""'], {}), "('/var/run/exhal.service')\n", (206, 232), False, 'import os\n'), ((1266, 1293), 'contextlib.suppress', 'suppress', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (1274, 1293), False, 'from contextlib import suppress\n'), ((1307, 1330), 'os.remove', 'os.remove', (['self.pidfile'], {}), '(self.pidfile)\n', (1316, 1330), False, 'import os\n'), ((573, 594), 'os.kill', 'os.kill', (['pid', 'SIGTERM'], {}), '(pid, SIGTERM)\n', (580, 594), False, 'import os\n'), ((611, 626), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (621, 626), False, 'import time\n'), ((1073, 1082), 'os.fork', 'os.fork', ([], {}), '()\n', (1080, 1082), False, 'import os\n'), ((1104, 1115), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1112, 1115), False, 'import sys\n'), ((806, 817), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (814, 817), False, 'import sys\n'), ((1432, 1443), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1441, 1443), False, 'import os\n')]
|
import asyncio
import os
from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \
WriteTransport, Task, BaseTransport
from io import BytesIO
from typing import Any, List, Optional, Mapping
from bitstring import tokenparser, BitStream
def random_byte_array(size: int) -> bytes:
return os.urandom(size)
class StreamClosedException(Exception):
pass
class FIFOStream:
def __init__(self, reader: StreamReader) -> None:
self.reader = reader
self.buffer = BitStream()
self.total_bytes = 0
super().__init__()
async def read(self, fmt):
_, token = tokenparser(fmt)
assert len(token) == 1
name, length, _ = token[0]
assert length is not None
bit_needed = int(length) - (self.buffer.length - self.buffer.pos)
while bit_needed > 0:
new_data = await self.reader.read(4096)
if len(new_data) == 0:
raise StreamClosedException()
self.buffer.append(new_data)
bit_needed = int(length) - (self.buffer.length - self.buffer.pos)
self.total_bytes += length
value = self.buffer.read(fmt)
del self.buffer[:length]
self.buffer.bitpos = 0
return value
class BufferedWriteTransport(WriteTransport):
def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None:
self._buffer = buffer
self._closing = False
self._closed = False
super().__init__(extra)
def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...) -> None:
raise NotImplementedError
def get_write_buffer_size(self) -> int:
raise NotImplementedError
def write(self, data: Any) -> None:
self._buffer.write(data)
def writelines(self, list_of_data: List[Any]) -> None:
raise NotImplementedError
def write_eof(self) -> None:
raise NotImplementedError
def can_write_eof(self) -> bool:
return False
def abort(self) -> None:
raise NotImplementedError
def is_closing(self) -> bool:
return self._closing is True or self._closed is True
def close(self) -> None:
self._closing = True
self._closed = True
class RTMPProtocol(asyncio.Protocol):
def __init__(self, controller, loop: AbstractEventLoop) -> None:
self.loop: AbstractEventLoop = loop
self.transport: BaseTransport = None
self.reader: StreamReader = None
self.writer: StreamWriter = None
self.controller = controller
self.task: Task = None
super().__init__()
def connection_made(self, transport):
self.reader = StreamReader(loop=self.loop)
self.writer = StreamWriter(transport,
self,
self.reader,
self.loop)
self.task = self.loop.create_task(self.controller(self.reader, self.writer))
def connection_lost(self, exc):
self.reader.feed_eof()
def data_received(self, data):
self.reader.feed_data(data)
async def _drain_helper(self):
pass
async def _get_close_waiter(self, stream: StreamWriter):
return self.task
|
[
"bitstring.tokenparser",
"asyncio.StreamReader",
"asyncio.StreamWriter",
"os.urandom",
"bitstring.BitStream"
] |
[((307, 323), 'os.urandom', 'os.urandom', (['size'], {}), '(size)\n', (317, 323), False, 'import os\n'), ((501, 512), 'bitstring.BitStream', 'BitStream', ([], {}), '()\n', (510, 512), False, 'from bitstring import tokenparser, BitStream\n'), ((620, 636), 'bitstring.tokenparser', 'tokenparser', (['fmt'], {}), '(fmt)\n', (631, 636), False, 'from bitstring import tokenparser, BitStream\n'), ((2711, 2739), 'asyncio.StreamReader', 'StreamReader', ([], {'loop': 'self.loop'}), '(loop=self.loop)\n', (2723, 2739), False, 'from asyncio import StreamReader, StreamWriter, AbstractEventLoop, WriteTransport, Task, BaseTransport\n'), ((2762, 2815), 'asyncio.StreamWriter', 'StreamWriter', (['transport', 'self', 'self.reader', 'self.loop'], {}), '(transport, self, self.reader, self.loop)\n', (2774, 2815), False, 'from asyncio import StreamReader, StreamWriter, AbstractEventLoop, WriteTransport, Task, BaseTransport\n')]
|
import os
import json
from app.config import DATA_PATH
from app.utils.mass import string_to_md5
"""
cover:
link:
text:
type:
top:
"""
def get_posters():
with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f:
data = json.load(f)
return data['data']
def save_poster(data):
with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f:
json.dump({'data': data}, f)
def add_poster(post:dict):
data = get_posters()
post['id'] = string_to_md5(post['link'])
if post['top']:
for i in data:
i['top'] = False
# 否在在原数据上面追加
data.append(post)
save_poster(data)
return "添加成功"
def set_as_top(_id):
data = get_posters()
for i in data:
if i['id'] == _id:
i['top'] = True
else:
i['top'] = False
save_poster(data)
def delete_poster(_id):
data = get_posters()
if len(data) == 1:
return data
for i in range(len(data)):
if data[i]['id'] == _id:
del data[i]
break
save_poster(data)
return data
|
[
"json.dump",
"json.load",
"os.path.join",
"app.utils.mass.string_to_md5"
] |
[((477, 504), 'app.utils.mass.string_to_md5', 'string_to_md5', (["post['link']"], {}), "(post['link'])\n", (490, 504), False, 'from app.utils.mass import string_to_md5\n'), ((236, 248), 'json.load', 'json.load', (['f'], {}), '(f)\n', (245, 248), False, 'import json\n'), ((376, 404), 'json.dump', 'json.dump', (["{'data': data}", 'f'], {}), "({'data': data}, f)\n", (385, 404), False, 'import json\n'), ((170, 208), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""poster.json"""'], {}), "(DATA_PATH, 'poster.json')\n", (182, 208), False, 'import os\n'), ((317, 355), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""poster.json"""'], {}), "(DATA_PATH, 'poster.json')\n", (329, 355), False, 'import os\n')]
|
from __future__ import print_function, unicode_literals
import sys
import os
import traceback
import time
from requests.utils import quote
from requests.exceptions import ConnectionError
import click
from stable_world.py_helpers import platform_uname
from stable_world import __version__ as version
from stable_world import errors
original_excepthook = sys.excepthook
from stable_world.py_helpers import PY3
if PY3:
unicode = str
def write_error_log(cache_dirname, exctype, value, tb):
'''
Write the exception to a the log file
'''
logfile = os.path.join(cache_dirname, 'logs', 'debug.txt')
try:
with open(logfile, 'w') as fd:
uname = platform_uname()
header = '[Unhandled Exception at {}] system={}, stable.world version: {}'
print(header.format(time.ctime(), uname.system, version), file=fd)
tb = '\n'.join(traceback.format_exception(exctype, value, tb))
print(tb, file=fd)
click.echo('\n Wrote full traceback to "{}"\n'.format(logfile), err=True)
except Exception:
click.echo("Failed to write logfile", err=True)
original_excepthook(exctype, value, tb)
def brief_excepthook(cache_dirname):
"""
Shorten exeptions with the base class errors.UserError
"""
def inner(exctype, value, tb):
if issubclass(exctype, errors.BRIEF_ERRORS):
click.secho("\n\n {}: ".format(exctype.__name__), nl=False, fg='red', bold=True, err=True)
click.echo(unicode(value), err=True)
click.echo(err=True)
elif issubclass(exctype, ConnectionError):
click.secho("\n\n {}: ".format(exctype.__name__), nl=False, fg='red', bold=True, err=True)
click.echo('Could not connect to url "{}"'.format(value.request.url), err=True)
click.echo(err=True)
else:
msg = "\n\n Critical! Unhandled Exception\n {}: ".format(exctype.__name__)
click.secho(msg, nl=False, fg='red', bold=True, err=True)
click.echo(unicode(value), err=True)
click.echo(err=True)
click.echo('\n Check for updates on this exception on the issue tracker:')
search_str = quote('is:issue {} "{}"'.format(exctype.__name__, value))
click.echo(' ', nl=False)
click.secho(
'https://github.com/srossross/stable.world/issues?q={}\n'.format(search_str),
fg='blue', underline=True, err=True
)
click.echo(' Or create a new issue:', err=True)
click.echo(' ', nl=False, err=True)
click.secho(
'https://github.com/srossross/stable.world/issues/new',
fg='blue', underline=True, err=True
)
write_error_log(cache_dirname, exctype, value, tb)
return
return inner
|
[
"stable_world.py_helpers.platform_uname",
"traceback.format_exception",
"click.echo",
"time.ctime",
"click.secho",
"os.path.join"
] |
[((567, 615), 'os.path.join', 'os.path.join', (['cache_dirname', '"""logs"""', '"""debug.txt"""'], {}), "(cache_dirname, 'logs', 'debug.txt')\n", (579, 615), False, 'import os\n'), ((685, 701), 'stable_world.py_helpers.platform_uname', 'platform_uname', ([], {}), '()\n', (699, 701), False, 'from stable_world.py_helpers import platform_uname\n'), ((1089, 1136), 'click.echo', 'click.echo', (['"""Failed to write logfile"""'], {'err': '(True)'}), "('Failed to write logfile', err=True)\n", (1099, 1136), False, 'import click\n'), ((1554, 1574), 'click.echo', 'click.echo', ([], {'err': '(True)'}), '(err=True)\n', (1564, 1574), False, 'import click\n'), ((895, 941), 'traceback.format_exception', 'traceback.format_exception', (['exctype', 'value', 'tb'], {}), '(exctype, value, tb)\n', (921, 941), False, 'import traceback\n'), ((1836, 1856), 'click.echo', 'click.echo', ([], {'err': '(True)'}), '(err=True)\n', (1846, 1856), False, 'import click\n'), ((1976, 2033), 'click.secho', 'click.secho', (['msg'], {'nl': '(False)', 'fg': '"""red"""', 'bold': '(True)', 'err': '(True)'}), "(msg, nl=False, fg='red', bold=True, err=True)\n", (1987, 2033), False, 'import click\n'), ((2095, 2115), 'click.echo', 'click.echo', ([], {'err': '(True)'}), '(err=True)\n', (2105, 2115), False, 'import click\n'), ((2129, 2214), 'click.echo', 'click.echo', (['"""\n Check for updates on this exception on the issue tracker:"""'], {}), '("""\n Check for updates on this exception on the issue tracker:"""\n )\n', (2139, 2214), False, 'import click\n'), ((2302, 2332), 'click.echo', 'click.echo', (['""" """'], {'nl': '(False)'}), "(' ', nl=False)\n", (2312, 2332), False, 'import click\n'), ((2530, 2580), 'click.echo', 'click.echo', (['""" Or create a new issue:"""'], {'err': '(True)'}), "(' Or create a new issue:', err=True)\n", (2540, 2580), False, 'import click\n'), ((2593, 2633), 'click.echo', 'click.echo', (['""" """'], {'nl': '(False)', 'err': '(True)'}), "(' ', nl=False, err=True)\n", (2603, 2633), False, 'import click\n'), ((2646, 2755), 'click.secho', 'click.secho', (['"""https://github.com/srossross/stable.world/issues/new"""'], {'fg': '"""blue"""', 'underline': '(True)', 'err': '(True)'}), "('https://github.com/srossross/stable.world/issues/new', fg=\n 'blue', underline=True, err=True)\n", (2657, 2755), False, 'import click\n'), ((821, 833), 'time.ctime', 'time.ctime', ([], {}), '()\n', (831, 833), False, 'import time\n')]
|
from prompt_toolkit.application import Application
from prompt_toolkit.formatted_text import merge_formatted_text, to_formatted_text
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, VSplit, Window
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.widgets import Label, TextArea
from lira.app import LiraApp
from lira.book import Book
def get_key_bindings():
keys = KeyBindings()
@keys.add("c-c")
@keys.add("c-q")
def _(event):
"""Pressing Ctrl-Q or Ctrl-C will exit the user interface."""
event.app.exit()
return keys
themes = {
"default": {
"Text": "#fff",
"Strong": "#fff bold",
"Emphasis": "#fff italic",
"Literal": "#fff",
"Paragraph": "#fff",
"CodeBlock": "#fff",
"Prompt": "#fff",
"TestBlock": "#fff",
"Section": "#fff",
"Separator": "#00ff00",
}
}
styles = themes["default"]
sections = {
"menu": TextArea(
height=40, width=25, style=styles["Text"], text="Python-Tutorial\n"
),
"status": TextArea(
height=3,
prompt=">>> ",
style=styles["Text"],
multiline=False,
wrap_lines=False,
),
"text": TextArea(height=10, width=40, style=styles["Text"], text="text"),
"prompt": TextArea(height=10, width=40, style=styles["Text"], text="prompt"),
"vseparator": Window(height=0, width=1, char="|", style=styles["Separator"]),
"hseparator": Window(height=1, char="-", style=styles["Separator"]),
}
class TerminalUI:
def __init__(self, path):
self.theme = "default"
sections_list = []
for section in ["text", "prompt"]:
sections_list.append(sections[section])
book = Book(root=path)
book.parse()
chapters = book.chapters[1]
chapters.parse()
contents = chapters.contents[0]
render = self.get_label(contents)
label = Label(merge_formatted_text(render))
self.container = HSplit(
[
VSplit(
[
sections["menu"],
sections["vseparator"],
HSplit([label, sections["prompt"]]),
]
),
sections["hseparator"],
sections["status"],
]
)
def get_label(self, contents):
render = []
for node in contents.children:
if node.is_terminal:
text = node.text()
style = node.tagname
render.append(to_formatted_text(text, styles[style]))
else:
render.extend(self.get_label(node))
render.append(to_formatted_text("\n", ""))
return render
def run(self):
lira = LiraApp()
lira.setup()
self.app = Application(
layout=Layout(self.container),
key_bindings=get_key_bindings(),
mouse_support=True,
full_screen=True,
)
self.app.run()
|
[
"prompt_toolkit.layout.layout.Layout",
"prompt_toolkit.widgets.TextArea",
"prompt_toolkit.key_binding.KeyBindings",
"prompt_toolkit.layout.containers.HSplit",
"lira.app.LiraApp",
"prompt_toolkit.layout.containers.Window",
"lira.book.Book",
"prompt_toolkit.formatted_text.to_formatted_text",
"prompt_toolkit.formatted_text.merge_formatted_text"
] |
[((445, 458), 'prompt_toolkit.key_binding.KeyBindings', 'KeyBindings', ([], {}), '()\n', (456, 458), False, 'from prompt_toolkit.key_binding import KeyBindings\n'), ((1014, 1091), 'prompt_toolkit.widgets.TextArea', 'TextArea', ([], {'height': '(40)', 'width': '(25)', 'style': "styles['Text']", 'text': '"""Python-Tutorial\n"""'}), "(height=40, width=25, style=styles['Text'], text='Python-Tutorial\\n')\n", (1022, 1091), False, 'from prompt_toolkit.widgets import Label, TextArea\n'), ((1121, 1215), 'prompt_toolkit.widgets.TextArea', 'TextArea', ([], {'height': '(3)', 'prompt': '""">>> """', 'style': "styles['Text']", 'multiline': '(False)', 'wrap_lines': '(False)'}), "(height=3, prompt='>>> ', style=styles['Text'], multiline=False,\n wrap_lines=False)\n", (1129, 1215), False, 'from prompt_toolkit.widgets import Label, TextArea\n'), ((1272, 1336), 'prompt_toolkit.widgets.TextArea', 'TextArea', ([], {'height': '(10)', 'width': '(40)', 'style': "styles['Text']", 'text': '"""text"""'}), "(height=10, width=40, style=styles['Text'], text='text')\n", (1280, 1336), False, 'from prompt_toolkit.widgets import Label, TextArea\n'), ((1352, 1418), 'prompt_toolkit.widgets.TextArea', 'TextArea', ([], {'height': '(10)', 'width': '(40)', 'style': "styles['Text']", 'text': '"""prompt"""'}), "(height=10, width=40, style=styles['Text'], text='prompt')\n", (1360, 1418), False, 'from prompt_toolkit.widgets import Label, TextArea\n'), ((1438, 1500), 'prompt_toolkit.layout.containers.Window', 'Window', ([], {'height': '(0)', 'width': '(1)', 'char': '"""|"""', 'style': "styles['Separator']"}), "(height=0, width=1, char='|', style=styles['Separator'])\n", (1444, 1500), False, 'from prompt_toolkit.layout.containers import HSplit, VSplit, Window\n'), ((1520, 1573), 'prompt_toolkit.layout.containers.Window', 'Window', ([], {'height': '(1)', 'char': '"""-"""', 'style': "styles['Separator']"}), "(height=1, char='-', style=styles['Separator'])\n", (1526, 1573), False, 'from prompt_toolkit.layout.containers import HSplit, VSplit, Window\n'), ((1796, 1811), 'lira.book.Book', 'Book', ([], {'root': 'path'}), '(root=path)\n', (1800, 1811), False, 'from lira.book import Book\n'), ((2864, 2873), 'lira.app.LiraApp', 'LiraApp', ([], {}), '()\n', (2871, 2873), False, 'from lira.app import LiraApp\n'), ((1999, 2027), 'prompt_toolkit.formatted_text.merge_formatted_text', 'merge_formatted_text', (['render'], {}), '(render)\n', (2019, 2027), False, 'from prompt_toolkit.formatted_text import merge_formatted_text, to_formatted_text\n'), ((2777, 2804), 'prompt_toolkit.formatted_text.to_formatted_text', 'to_formatted_text', (['"""\n"""', '""""""'], {}), "('\\n', '')\n", (2794, 2804), False, 'from prompt_toolkit.formatted_text import merge_formatted_text, to_formatted_text\n'), ((2947, 2969), 'prompt_toolkit.layout.layout.Layout', 'Layout', (['self.container'], {}), '(self.container)\n', (2953, 2969), False, 'from prompt_toolkit.layout.layout import Layout\n'), ((2645, 2683), 'prompt_toolkit.formatted_text.to_formatted_text', 'to_formatted_text', (['text', 'styles[style]'], {}), '(text, styles[style])\n', (2662, 2683), False, 'from prompt_toolkit.formatted_text import merge_formatted_text, to_formatted_text\n'), ((2237, 2272), 'prompt_toolkit.layout.containers.HSplit', 'HSplit', (["[label, sections['prompt']]"], {}), "([label, sections['prompt']])\n", (2243, 2272), False, 'from prompt_toolkit.layout.containers import HSplit, VSplit, Window\n')]
|
import yaml
import numpy as np
import logging
logger = logging.getLogger("cm.conf")
class ControlModelParameters:
"""
Load parameters from .yaml file.
"""
def __init__(self):
self._config = None
self.wind_farm = None
self.turbine = None
self.simulation = None
self.flow = None
self.ssc = None
self.mode = None
def load(self, file):
logger.info("Loading configuration from: {}".format(file))
self._load_configuration_from_yaml(file)
try:
self._assign_configuration()
except KeyError as ke:
message = "Missing definition in config file, did not find {}".format(ke)
logger.error(message, exc_info=1)
raise KeyError("Missing definition in config file, did not find {}".format(ke))
logger.info("Loaded configuration.")
def _load_configuration_from_yaml(self, file):
stream = open(file, "r")
self._config = yaml.load(stream=stream, Loader=yaml.SafeLoader)
def print(self):
print(yaml.dump(self._config))
def _assign_configuration(self):
self.mode = self._config["mode"]
if self.mode == "simulation":
self.wind_farm = self.WindFarm(self._config["wind_farm"])
self.turbine = self.Turbine(self._config["turbine"])
self.simulation = self.Simulation(self._config["simulation"])
self.flow = self.Flow(self._config["flow"])
if self.mode == "supercontroller":
self.ssc = self.SSC(self._config["ssc"])
self.turbine = self.Turbine(self._config["turbine"])
# if self.ssc.type == "gradient_step":
self.wind_farm = self.WindFarm(self._config["wind_farm"])
self.simulation = self.Simulation(self._config["simulation"])
self.flow = self.Flow(self._config["flow"])
# else:
# self.simulation = self.Simulation(self._config["simulation"])
if "estimator" in self._config.keys():
self.estimator = self.Estimator(self._config["estimator"])
class WindFarm:
def __init__(self, config_dict):
self.size = config_dict["size"]
self.cells = config_dict["cells"]
self.positions = config_dict["positions"]
self.yaw_angles = np.deg2rad(config_dict["yaw_angles"])
# self.yaw_angles = [np.array(x) for x in self.yaw_angles]
self.do_refine_turbines = config_dict["do_refine_turbines"]
if self.do_refine_turbines:
self.refine_radius = config_dict["refine_radius"]
else:
self.refine_radius = None
self.controller = self.FarmController(config_dict["farm_controller"])
class FarmController:
def __init__(self, config_dict):
self.control_discretisation = config_dict["control_discretisation"]
self.controls = config_dict["controls"]
self.with_external_controller = False
for control in self.controls.values():
if control['type'] == 'external':
self.with_external_controller = True
self.external_controls = config_dict["external_controller"]["controls"]
self.port = config_dict["external_controller"]["port"]
break
# todo: refine control settings
class Turbine:
"""
Turbine configuration class
"""
def __init__(self,config_dict):
self.axial_induction = config_dict["axial_induction"]
self.diameter = config_dict["diameter"]
self.radius = self.diameter / 2
self.thickness = config_dict["thickness"]
self.hub_height = config_dict["hub_height"]
self.kernel = config_dict["kernel"]
self.force_scale_axial = config_dict.get("force_scale_axial",1.)
self.force_scale_transverse = config_dict.get("force_scale_transverse",1.)
self.power_scale = config_dict.get("power_scale",1.)
self.yaw_rate_limit = config_dict.get("yaw_rate_limit",-1)
self.coefficients = config_dict.get("coefficients", "induction")
self.pitch = config_dict.get("pitch", 0.)
self.torque = config_dict.get("torque", 0.)
class Simulation:
def __init__(self, config_dict):
self.is_dynamic = config_dict["is_dynamic"]
# if not self.is_dynamic:
# raise NotImplementedError("Steady flow currently not implemented")
if self.is_dynamic:
self.total_time = config_dict["total_time"]
self.time_step = config_dict["time_step"]
self.write_time_step = config_dict["write_time_step"]
self.name = config_dict["name"]
self.save_logs = config_dict["save_logs"]
self.dimensions = config_dict["dimensions"]
self.probes = config_dict.get("probes",[])
class Flow:
def __init__(self, config_dict):
self.kinematic_viscosity = config_dict["kinematic_viscosity"]
self.tuning_viscosity = config_dict["tuning_viscosity"]
self.density = config_dict["density"]
self.mixing_length = config_dict["mixing_length"]
self.wake_mixing_length = config_dict["wake_mixing_length"]
self.wake_mixing_width = config_dict["wake_mixing_width"]
self.wake_mixing_offset = config_dict["wake_mixing_offset"]
self.wake_mixing_ml_max = config_dict["wake_mixing_ml_max"]
self.continuity_correction = config_dict["continuity_correction"]
self.type = config_dict["type"]
if self.type == "steady":
self.inflow_velocity = config_dict["inflow_velocity"]
elif self.type == "series":
self.inflow_velocity_series = np.array(config_dict["inflow_velocity_series"])
self.inflow_velocity = self.inflow_velocity_series[0, 1:3]
self.finite_element = config_dict.get("finite_element","TH")
class SSC:
def __init__(self, config_dict):
self.port = config_dict["port"]
self.controls = config_dict["controls"]
self.external_controls = config_dict["external_controls"]
self.external_measurements = config_dict["external_measurements"]
self.control_discretisation = config_dict["control_discretisation"]
self.prediction_horizon = config_dict["prediction_horizon"]
self.control_horizon = config_dict["control_horizon"]
self.transient_time = config_dict.get("transient_time",-1)
# self.objective = config_dict["objective"]
# if self.objective == "tracking":
# self.power_reference = np.array(config_dict["power_reference"])
# self.power_reference[:, 1] *= 1e6
# # if self.mode == "pitch_torque":
# # raise NotImplementedError("gradient step pitch torque control not implemented.")
self.plant = config_dict.get("plant", "cm")
if self.plant == "sowfa":
self.sowfa_time_step = config_dict["sowfa_time_step"]
class Estimator:
def __init__(self, config_dict):
try:
self.source = config_dict["source"]
except KeyError as ke:
logger.error("Only SOWFA as data source implemented")
self.estimation_type = config_dict["type"]
self.assimilation_window = config_dict["assimilation_window"]
self.forward_step = config_dict.get("forward_step", 1)
self.transient_period = config_dict.get("transient_period", -1)
self.prediction_period = config_dict.get("prediction_period", 0)
self.cost_function_weights = config_dict["cost_function_weights"]
self.data = config_dict["data"]
par = ControlModelParameters()
wind_farm = par.wind_farm
turbine = par.turbine
flow = par.flow
simulation = par.simulation
with_adjoint = True
if __name__ == '__main__':
par = ControlModelParameters()
par.load("../config/test_config.yaml")
# par.print()
# par.turbine.print()
|
[
"yaml.load",
"numpy.deg2rad",
"yaml.dump",
"numpy.array",
"logging.getLogger"
] |
[((56, 84), 'logging.getLogger', 'logging.getLogger', (['"""cm.conf"""'], {}), "('cm.conf')\n", (73, 84), False, 'import logging\n'), ((991, 1039), 'yaml.load', 'yaml.load', ([], {'stream': 'stream', 'Loader': 'yaml.SafeLoader'}), '(stream=stream, Loader=yaml.SafeLoader)\n', (1000, 1039), False, 'import yaml\n'), ((1076, 1099), 'yaml.dump', 'yaml.dump', (['self._config'], {}), '(self._config)\n', (1085, 1099), False, 'import yaml\n'), ((2350, 2387), 'numpy.deg2rad', 'np.deg2rad', (["config_dict['yaw_angles']"], {}), "(config_dict['yaw_angles'])\n", (2360, 2387), True, 'import numpy as np\n'), ((5981, 6028), 'numpy.array', 'np.array', (["config_dict['inflow_velocity_series']"], {}), "(config_dict['inflow_velocity_series'])\n", (5989, 6028), True, 'import numpy as np\n')]
|
# !/usr/bin/python
# -*- coding:UTF-8 -*-
# -----------------------------------------------------------------------#
# File Name: textrank_keyword
# Author: <NAME>
# Mail: <EMAIL>
# Created Time: 2021-09-04
# Description:
# -----------------------------------------------------------------------#
import networkx as nx
import numpy as np
from knlp.common.constant import sentence_delimiters, allow_speech_tags
from knlp.information_extract.keywords_extraction.textrank import TextRank
from knlp.utils.util import get_default_stop_words_file, AttrDict
class TextRank4Keyword(TextRank):
"""
这个函数实现了利用Text rank算法来实现关键词提取的功能。
基础的思路就是先分词,然后计算每个词语的权重,最后按照顺序排列,得到重要性
暂时不考虑英文的需求
介绍请见 https://www.jiqizhixin.com/articles/2018-12-28-18
ref https://github.com/letiantian/TextRank4ZH/blob/master/textrank4zh/
"""
def __init__(self, stop_words_file=get_default_stop_words_file(), private_vocab=None,
allow_speech_tags=allow_speech_tags,
delimiters="|".join(sentence_delimiters)):
"""
Args:
stop_words_file: 停用词的文件路径
label_set:
allow_speech_tags: 要保留的词性
delimiters: 默认值是`?!;?!。;…\n`,用来将文本拆分为句子。
"""
super().__init__(stop_words_file=stop_words_file, private_vocab=private_vocab,
allow_speech_tags=allow_speech_tags,
delimiters=delimiters)
def get_keywords(self, num=6, window=2, word_min_len=1, page_rank_config={'alpha': 0.85, }):
"""
获取最重要的num个长度大于等于word_min_len的关键词。
Args:
num:
window:
word_min_len:
page_rank_config:
Returns: 关键词列表。AttriDict {}
"""
# 获取按照text rank的方式得到的关键词,并排序
keywords = self.sort_words(self._vertex_source, self._edge_source, window=window,
page_rank_config=page_rank_config)
result = []
count = 0
for item in keywords:
if count >= num:
break
if len(item.word) >= word_min_len:
result.append(item)
count += 1
return result
def get_keyphrases(self, keywords_num=12, min_occur_num=2):
"""
获取关键短语。
获取 keywords_num 个关键词构造的可能出现的短语,要求这个短语在原文本中至少出现的次数为min_occur_num。
使用有限的keywords_num 个关键词来构造短语
Args:
keywords_num: 关键词的个数
min_occur_num: 最少出现次数
Returns: 关键短语的列表。
"""
keywords_set = set([item.word for item in self.get_keywords(num=keywords_num, word_min_len=1)])
keyphrases = set()
for sentence in self.words_no_filter:
one = []
for word in sentence:
if word in keywords_set:
one.append(word)
else:
if len(one) > 1:
keyphrases.add(''.join(one)) # concat在一起
if len(one) == 0:
continue
else:
one = []
# 兜底
if len(one) > 1:
keyphrases.add(''.join(one))
return [phrase for phrase in keyphrases
if self.text.count(phrase) >= min_occur_num or phrase in self.label_set]
@staticmethod
def sort_words(vertex_source, edge_source, window=2, page_rank_config=None):
"""
将单词按关键程度从大到小排序
Args:
vertex_source: 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点
edge_source: 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边
window: 一个句子中相邻的window个单词,两两之间认为有边
page_rank_config: pagerank的设置
Returns:
"""
page_rank_config = {'alpha': 0.85, } if not page_rank_config else page_rank_config
sorted_words = []
word_index = {}
index_word = {}
_vertex_source = vertex_source
_edge_source = edge_source
words_number = 0
for word_list in _vertex_source:
for word in word_list:
if word not in word_index:
word_index[word] = words_number
index_word[words_number] = word
# MAP WORD TO AN INDEX
words_number += 1
graph = np.zeros((words_number, words_number)) # words_number X words_number MATRIX
def combine(word_list, window=2):
"""
构造在window下的单词组合,用来构造单词之间的边。
Args:
word_list: list of str, 由单词组成的列表。
window: int, 窗口大小。
Returns:
"""
if window < 2:
window = 2
for x in range(1, window):
if x >= len(word_list):
break
word_list2 = word_list[x:]
res = zip(word_list, word_list2)
for r in res:
yield r
for word_list in _edge_source:
for w1, w2 in combine(word_list, window):
if w1 in word_index and w2 in word_index:
index1 = word_index[w1]
index2 = word_index[w2]
# 有链接的位置 = 1。0
graph[index1][index2] = 1.0
graph[index2][index1] = 1.0
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **page_rank_config) # this is a dict DIRECTLY GET THE SCORE FOR ALL THIS MATRIX
sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(word=index_word[index], weight=score)
sorted_words.append(item)
return sorted_words
if __name__ == '__main__':
text = "测试分词的结果是否符合预期"
window = 5
num = 20
word_min_len = 2
need_key_phrase = True
tr4w = TextRank4Keyword()
tr4w.analyze(text=text, lower=True)
output = {"key_words": [], "key_phrase": []}
res_keywords = tr4w.get_keywords(num=num, word_min_len=word_min_len, window=window)
for item in res_keywords:
kw_count = tr4w.text.count(item.word)
output["key_words"].append([item.word, item.weight, kw_count])
if need_key_phrase:
for phrase in tr4w.get_keyphrases(keywords_num=10, min_occur_num=2):
output['key_phrase'].append(phrase)
print(output)
|
[
"knlp.utils.util.get_default_stop_words_file",
"networkx.from_numpy_matrix",
"networkx.pagerank",
"knlp.utils.util.AttrDict",
"numpy.zeros"
] |
[((874, 903), 'knlp.utils.util.get_default_stop_words_file', 'get_default_stop_words_file', ([], {}), '()\n', (901, 903), False, 'from knlp.utils.util import get_default_stop_words_file, AttrDict\n'), ((4301, 4339), 'numpy.zeros', 'np.zeros', (['(words_number, words_number)'], {}), '((words_number, words_number))\n', (4309, 4339), True, 'import numpy as np\n'), ((5321, 5348), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['graph'], {}), '(graph)\n', (5341, 5348), True, 'import networkx as nx\n'), ((5366, 5407), 'networkx.pagerank', 'nx.pagerank', (['nx_graph'], {}), '(nx_graph, **page_rank_config)\n', (5377, 5407), True, 'import networkx as nx\n'), ((5618, 5664), 'knlp.utils.util.AttrDict', 'AttrDict', ([], {'word': 'index_word[index]', 'weight': 'score'}), '(word=index_word[index], weight=score)\n', (5626, 5664), False, 'from knlp.utils.util import get_default_stop_words_file, AttrDict\n')]
|
import os,sys
from PIL import Image
import numpy
LETTER_NB = 5
LETTER_SPACE = 1
LETTER_SIZE = 8
LETTER_LEFT = 10
LETTER_RIGHT = 16
class CaptchaReader(object):
"""docstring for CaptchaReader"""
def __init__(self, folderDico):
super(CaptchaReader, self).__init__()
self.folderDico = folderDico + "/"
def read(self, filename):
# Extract symbol from targetted captcha
symb_extractor = captchaSymbolExtractor()
listSymb = symb_extractor.extractSymbol(filename)
cap_string = ""
nb_unread = 0
for symb in listSymb:
succes = False
for f in os.listdir(self.folderDico):
if f.endswith(".png"):
pil_image = Image.open(self.folderDico + f)
dic_symb = numpy.array(pil_image)
if self.compare(symb, dic_symb):
succes = True
if f[0].isdigit():
cap_string += f[0]
else:
cap_string += f[3]
break
if not succes:
# If you go there, then the symbol has not been read
Image.fromarray(symb).save("error/symb" + str(nb_unread) + ".png")
nb_unread += 1
#return the string
return cap_string
def compare(self, symb_np, im_dic):
#print symb_np
return numpy.array_equal(symb_np, im_dic/255)
class captchaSymbolExtractor(object):
"""docstring for captchaSymbolExtractor"""
def __init__(self):
super(captchaSymbolExtractor, self).__init__()
def extractSymbol(self, filename):
# mat_pix is a numpy array
mat_pix = self.openImage(filename)
list_im = []
for i in range(5):
left = LETTER_LEFT + i * (LETTER_SIZE + LETTER_SPACE)
right = LETTER_LEFT + (i + 1) * (LETTER_SIZE + LETTER_SPACE) - 1
symb = mat_pix[6:19, left:right]
list_im.append(symb)
im = Image.fromarray(symb*255)
im = im.convert('1')
return list_im
def openImage(self, filename):
pil_image = Image.open(filename)
return numpy.array(pil_image)
|
[
"PIL.Image.open",
"PIL.Image.fromarray",
"numpy.array",
"numpy.array_equal",
"os.listdir"
] |
[((1149, 1189), 'numpy.array_equal', 'numpy.array_equal', (['symb_np', '(im_dic / 255)'], {}), '(symb_np, im_dic / 255)\n', (1166, 1189), False, 'import numpy\n'), ((1794, 1814), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1804, 1814), False, 'from PIL import Image\n'), ((1824, 1846), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (1835, 1846), False, 'import numpy\n'), ((566, 593), 'os.listdir', 'os.listdir', (['self.folderDico'], {}), '(self.folderDico)\n', (576, 593), False, 'import os, sys\n'), ((1678, 1705), 'PIL.Image.fromarray', 'Image.fromarray', (['(symb * 255)'], {}), '(symb * 255)\n', (1693, 1705), False, 'from PIL import Image\n'), ((639, 670), 'PIL.Image.open', 'Image.open', (['(self.folderDico + f)'], {}), '(self.folderDico + f)\n', (649, 670), False, 'from PIL import Image\n'), ((687, 709), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (698, 709), False, 'import numpy\n'), ((953, 974), 'PIL.Image.fromarray', 'Image.fromarray', (['symb'], {}), '(symb)\n', (968, 974), False, 'from PIL import Image\n')]
|
from pipeline.io import unique_id, video_generator
def test_video_generator_2015(bees_video, filelists_path):
gen = video_generator(bees_video, ts_format="2015", path_filelists=filelists_path)
results = list(gen)
assert len(results) == 3
prev_ts = 0.0
for _, _, ts in results:
assert ts > prev_ts
prev_ts = ts
def test_video_generator_2016(bees_video_2016):
gen = video_generator(bees_video_2016, ts_format="2016", path_filelists=None)
results = list(gen)
assert len(results) == 4
prev_ts = 0.0
for _, _, ts in results:
assert ts > prev_ts
prev_ts = ts
def test_unique_id():
first_id = unique_id()
second_id = unique_id()
assert first_id.bit_length() == 64
assert second_id.bit_length() == 64
assert first_id != second_id
|
[
"pipeline.io.video_generator",
"pipeline.io.unique_id"
] |
[((122, 198), 'pipeline.io.video_generator', 'video_generator', (['bees_video'], {'ts_format': '"""2015"""', 'path_filelists': 'filelists_path'}), "(bees_video, ts_format='2015', path_filelists=filelists_path)\n", (137, 198), False, 'from pipeline.io import unique_id, video_generator\n'), ((408, 479), 'pipeline.io.video_generator', 'video_generator', (['bees_video_2016'], {'ts_format': '"""2016"""', 'path_filelists': 'None'}), "(bees_video_2016, ts_format='2016', path_filelists=None)\n", (423, 479), False, 'from pipeline.io import unique_id, video_generator\n'), ((668, 679), 'pipeline.io.unique_id', 'unique_id', ([], {}), '()\n', (677, 679), False, 'from pipeline.io import unique_id, video_generator\n'), ((696, 707), 'pipeline.io.unique_id', 'unique_id', ([], {}), '()\n', (705, 707), False, 'from pipeline.io import unique_id, video_generator\n')]
|
import argparse
from .env_viewer import viewer
from .envs.bm_config import BM_Config
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Assistive Gym Environment Viewer')
parser.add_argument('--env', default='ScratchItchJaco-v1',
help='Environment to test (default: ScratchItchJaco-v1)')
bm_config = BM_Config()
parser = bm_config.add_bm_args(parser)
args = parser.parse_args()
bm_config.change_bm_config(args)
viewer(args.env)
|
[
"argparse.ArgumentParser"
] |
[((126, 197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Assistive Gym Environment Viewer"""'}), "(description='Assistive Gym Environment Viewer')\n", (149, 197), False, 'import argparse\n')]
|
import tweepy
import json
import pymongo
#from src_python import utilities
# Initialize the API consumer keys and access tokens
consumer_key = "LukFsjKDofVcCdiKsCnxiLx2V"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
# Authenticate tweepy using the keys
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
def get_user(user_id):
print("Searching full information for user with id " + str(user_id))
try:
user_json = api.get_user(user_id)
except tweepy.TweepError as tweep_error:
print("Error with code : " + str(tweep_error.response.text))
return 0
return user_json
def get_user_tweets(user_id):
timeline = []
progress = 0
statuses = []
for status in tweepy.Cursor(api.user_timeline, id=user_id).items():
timeline.append(status)
progress+=1
print("Fetched "+str(progress)+" out of all timeline items")
return statuses
def get_user_network(user_id):
print("Searching network for user with id " + str(user_id))
followers = []
friends = []
max_followers = 100000
max_friends = 100000
try:
for page in tweepy.Cursor(api.followers_ids, id=user_id).pages():
followers.extend(page)
if len(followers) >= max_followers:
break
print("Followers so far : " + str(len(followers)))
print("finished followers")
for page in tweepy.Cursor(api.friends_ids, id=user_id).pages():
friends.extend(page)
if len(friends) >= max_friends:
break
print("Friends so far : " + str(len(friends)))
print("finished friends")
except tweepy.TweepError as tweep_error:
print("Error with code : " + str(tweep_error.response.text))
return 0
print("User with ID: " + user_id + " has " + str(len(followers)) + " followers and " + str(len(friends)) + " friends")
custom_object = {
"id": user_id,
"followers": followers,
"friends": friends
}
return custom_object
if __name__ == '__main__':
# Aristotle University's Twitter user ID
user_id = "234343780"
# <NAME>'s user_id
#user_id = "50374439"
### Get the entire timeline of tweets and retweets of a user ###
statuses = get_user_tweets(user_id)
for status in statuses:
print (status._json["text"])
#### Get full information about the user ###
# user_json = get_user(user_id)
# Access all the information using .*field*
# https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/user-object
# screen_name = str(user_json.screen_name)
# followers_count = str(user_json.followers_count)
# friends_count = str(user_json.friends_count)
#
# print ("This user has the screen name: "+screen_name)
# print ("This user has "+followers_count+" followers")
# print ("This user has "+friends_count+" friends")
#### Get the network (friends, followers) of the user ###
# network = get_user_network(user_id)
# print(network["friends"])
# print(network["followers"])
|
[
"tweepy.OAuthHandler",
"tweepy.Cursor",
"tweepy.API"
] |
[((297, 347), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (316, 347), False, 'import tweepy\n'), ((412, 507), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)', 'compression': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True,\n compression=True)\n', (422, 507), False, 'import tweepy\n'), ((908, 952), 'tweepy.Cursor', 'tweepy.Cursor', (['api.user_timeline'], {'id': 'user_id'}), '(api.user_timeline, id=user_id)\n', (921, 952), False, 'import tweepy\n'), ((1319, 1363), 'tweepy.Cursor', 'tweepy.Cursor', (['api.followers_ids'], {'id': 'user_id'}), '(api.followers_ids, id=user_id)\n', (1332, 1363), False, 'import tweepy\n'), ((1597, 1639), 'tweepy.Cursor', 'tweepy.Cursor', (['api.friends_ids'], {'id': 'user_id'}), '(api.friends_ids, id=user_id)\n', (1610, 1639), False, 'import tweepy\n')]
|
import xlrd
# from xlutils.copy import copy as xlscopy
import shutil
import os
from numpy import sqrt, abs
import sys
sys.path.append('../..') # 如果最终要从main.py调用,则删掉这句
from GeneralMethod.PyCalcLib import Fitting
from GeneralMethod.PyCalcLib import Method
from reportwriter.ReportWriter import ReportWriter
class thermology:
report_data_keys = [
'1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20',
'21','22','23','24','25','26','27','28','29',
'101','102','103','104','105','106','107','108','109','110','111','112','113','114','115','116','117',
'118','119','120','121','122','123','124','125','126','127','128','129',
'L','K','J',
'Ua','UJ'
]
PREVIEW_FILENAME = "Preview.pdf"
DATA_SHEET_FILENAME = "data.xlsx"
REPORT_TEMPLATE_FILENAME = "thermology_empty.docx"
REPORT_OUTPUT_FILENAME = "thermology_out.docx"
def __init__(self):
self.data = {} # 存放实验中的各个物理量
self.uncertainty = {} # 存放物理量的不确定度
self.report_data = {} # 存放需要填入实验报告的
print("1021 测量水的溶解热+焦耳热功当量\n1. 实验预习\n2. 数据处理")
while True:
try:
oper = input("请选择: ").strip()
except EOFError:
sys.exit(0)
if oper != '1' and oper != '2':
print("输入内容非法!请输入一个数字1或2")
else:
break
if oper == '1':
print("现在开始实验预习")
print("正在打开预习报告......")
os.startfile(self.PREVIEW_FILENAME)
elif oper == '2':
print("现在开始数据处理")
print("即将打开数据输入文件......")
# 打开数据输入文件
os.startfile(self.DATA_SHEET_FILENAME)
input("输入数据完成后请保存并关闭excel文件,然后按回车键继续")
# 从excel中读取数据
self.input_data("./"+self.DATA_SHEET_FILENAME) # './' is necessary when running this file, but should be removed if run main.py
print("数据读入完毕,处理中......")
# 计算物理量
self.calc_data1()
self.calc_data2()
# 计算不确定度
self.calc_uncertainty()
print("正在生成实验报告......")
# 生成实验报告
self.fill_report()
print("实验报告生成完毕,正在打开......")
os.startfile(self.REPORT_OUTPUT_FILENAME)
print("Done!")
'''
从excel表格中读取数据
@param filename: 输入excel的文件名
@return none
'''
def input_data(self, filename):
ws = xlrd.open_workbook(filename).sheet_by_name('thermology1')
# 从excel中读取数据
list_time = []
list_resistance = []
list_temperature = []
list_weight = []
for row in [1, 4, 7]:
for col in range(1, 8):
list_time.append(float(ws.cell_value(row, col))) #时间
self.data['list_time'] = list_time
for row in [2, 5, 8]:
for col in range(1, 8):
list_resistance.append(float(ws.cell_value(row, col))) #电阻值
self.data['list_resistance'] = list_resistance
for row in [3, 6, 9]:
for col in range(1, 8):
list_temperature.append(float(ws.cell_value(row, col))) #温度
self.data['list_temperature'] = list_temperature
col = 1
for row in range(10, 14):
list_weight.append(float(ws.cell_value(row,col))) #几种质量
self.data['list_weight'] = list_weight
row = 14
temp_ice = float(ws.cell_value(row, col)) #冰温度
self.data['temp_ice'] = temp_ice
row = 15
temp_env = float(ws.cell_value(row, col)) #环境温度
self.data['temp_env'] = temp_env
ws = xlrd.open_workbook(filename).sheet_by_name('thermology2')
list_time2 = []
list_resistance2 = []
list_temperature2 = []
for row in [1, 4, 7, 10]:
for col in range(1, 9):
if isinstance(ws.cell_value(row, col), str):
continue
else:
list_time2.append(float(ws.cell_value(row, col)))
self.data['list_time2'] = list_time2
for row in [2, 5, 8, 11]:
for col in range(1, 9):
if isinstance(ws.cell_value(row, col), str):
continue
else:
list_resistance2.append(float(ws.cell_value(row, col)))
self.data['list_resistance2'] = list_resistance2
for row in [3, 6, 9, 12]:
for col in range(1, 9):
if isinstance(ws.cell_value(row, col), str):
continue
else:
list_temperature2.append(float(ws.cell_value(row, col)))
self.data['list_temperature2'] = list_temperature2
col = 1
row = 13
temp_env2 = float(ws.cell_value(row, col))
self.data['temp_env2'] = temp_env2
row = 14
voltage_begin = float(ws.cell_value(row, col))
self.data['voltage_begin'] = voltage_begin
row = 15
voltage_end = float(ws.cell_value(row, col))
self.data['voltage_end'] = voltage_end
row = 16
resitence = float(ws.cell_value(row, col))
self.data['resitence'] = resitence
self.data['c1'] = 0.389e3
self.data['c2'] = 0.389e3
self.data['c0'] = 4.18e3
self.data['ci'] = 1.80e3
def calc_data1(self):
list_weight = self.data['list_weight']
list_time1 = self.data['list_time']
list_temperature1 = self.data['list_temperature']
temp_ice = self.data['temp_ice']
temp_env = self.data['temp_env']
c1 = self.data['c1']
c2 = self.data['c2']
c0 = self.data['c0']
ci = self.data['ci']
m_water = list_weight[1] - list_weight[0]
m_ice = list_weight[2] - list_weight[1]
list_graph = Fitting.linear(list_time1, list_temperature1, show_plot=False)
self.data['list_graph'] = list_graph
temp_begin = list_graph[0] * list_time1[0] + list_graph[1]
temp_end = list_graph[0] * list_time1[(len(list_time1)-1)] + list_graph[1]
self.data['temp_begin'] = temp_begin
self.data['temp_end'] = temp_end
self.data['m_water'] = m_water
self.data['m_ice'] = m_ice
'''
print(temp_begin)
print('\n',temp_end)
print('\n',m_water)
print('\n',m_ice)
print('!1!\n',c0*m_water*0.001+c1*list_weight[3]*0.001+c2*(list_weight[0]-list_weight[3])*0.001)
print('\n!2!\n',temp_begin-temp_end)
print('\n!3!\n',c0*temp_end)
print('\n!4!\n',ci*temp_ice)
'''
L = 1/(m_ice*0.001) * (c0*m_water*0.001+c1*list_weight[3]*0.001+c2*(list_weight[0]-list_weight[3])*0.001) * (temp_begin-temp_end)- c0*temp_end + ci*temp_ice
K = c0 * m_water*0.001 * (list_temperature1[15]-list_temperature1[8]) / ((list_time1[15]-list_time1[8])*(list_temperature1[15]-temp_env))
self.data['L'] = L
self.data['K'] = K
def calc_data2(self):
c1 = self.data['c1']
c0 = self.data['c0']
list_temperature2 = self.data['list_temperature2']
list_weight = self.data['list_weight']
temp_env2 = self.data['temp_env2']
list_time2 = self.data['list_time2']
voltage_begin = self.data['voltage_begin']
voltage_end = self.data['voltage_end']
resitence = self.data['resitence']
m_water = list_weight[1] - list_weight[0]
list_x = []
list_y = []
for i in range(len(list_temperature2)):
if i==len(list_temperature2)-1:
break
list_x.append((list_temperature2[i]+list_temperature2[i+1])/2-temp_env2)
for i in range(len(list_temperature2)):
if i == len(list_temperature2)-1:
break
list_y.append((list_temperature2[i+1]-list_temperature2[i])/((list_time2[i+1]-list_time2[i])*60))
self.data['list_x'] = list_x
self.data['list_y'] = list_y
list_graph2 = Fitting.linear(list_x, list_y, show_plot=False)
self.data['list_graph2'] = list_graph2
J = ((voltage_begin+voltage_end)/2)**2/(list_graph2[1]*resitence*(c0*m_water*0.001+c1*list_weight[3]*0.001+64.38))
self.data['J'] = J
'''
print('b',list_graph2[0])
print('\n a',list_graph2[1])
print('\n r',list_graph2[2])
'''
def calc_uncertainty(self):
list_a = []
list_x = self.data['list_x']
list_y = self.data['list_y']
list_graph2 = self.data['list_graph2']
voltage_begin = self.data['voltage_begin']
voltage_end = self.data['voltage_end']
resitence = self.data['resitence']
c1 = self.data['c1']
c0 = self.data['c0']
list_weight = self.data['list_weight']
m_water = list_weight[1] - list_weight[0]
for i in range(len(list_x)):
list_a.append(list_y[i]-list_graph2[1]*list_x[i])
self.data['list_a'] = list_a
Ua = Method.a_uncertainty(self.data['list_a'])
self.data['Ua'] = Ua
UJ = abs(((voltage_begin+voltage_end)/2)**2/(Ua*resitence*(c0*m_water*0.001+c1*list_weight[3]*0.001 + 64.38)))
self.data['UJ'] = UJ
def fill_report(self):
# 表格:xy
for i, x_i in enumerate(self.data['list_x']):
self.report_data[str(i + 1)] = "%.5f" % (x_i)
for i, y_i in enumerate(self.data['list_y']):
self.report_data[str(i + 101)] = "%.5f" % (y_i)
# 最终结果
self.report_data['L'] = self.data['L']
self.report_data['K'] = self.data['K']
self.report_data['J'] = self.data['J']
self.report_data['Ua'] = self.data['Ua']
self.report_data['UJ'] = self.data['UJ']
RW = ReportWriter()
RW.load_replace_kw(self.report_data)
RW.fill_report(self.REPORT_TEMPLATE_FILENAME, self.REPORT_OUTPUT_FILENAME)
if __name__ == '__main__':
mc = thermology()
|
[
"sys.path.append",
"numpy.abs",
"GeneralMethod.PyCalcLib.Fitting.linear",
"reportwriter.ReportWriter.ReportWriter",
"xlrd.open_workbook",
"GeneralMethod.PyCalcLib.Method.a_uncertainty",
"sys.exit",
"os.startfile"
] |
[((126, 150), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (141, 150), False, 'import sys\n'), ((6025, 6087), 'GeneralMethod.PyCalcLib.Fitting.linear', 'Fitting.linear', (['list_time1', 'list_temperature1'], {'show_plot': '(False)'}), '(list_time1, list_temperature1, show_plot=False)\n', (6039, 6087), False, 'from GeneralMethod.PyCalcLib import Fitting\n'), ((8321, 8368), 'GeneralMethod.PyCalcLib.Fitting.linear', 'Fitting.linear', (['list_x', 'list_y'], {'show_plot': '(False)'}), '(list_x, list_y, show_plot=False)\n', (8335, 8368), False, 'from GeneralMethod.PyCalcLib import Fitting\n'), ((9355, 9396), 'GeneralMethod.PyCalcLib.Method.a_uncertainty', 'Method.a_uncertainty', (["self.data['list_a']"], {}), "(self.data['list_a'])\n", (9375, 9396), False, 'from GeneralMethod.PyCalcLib import Method\n'), ((9441, 9572), 'numpy.abs', 'abs', (['(((voltage_begin + voltage_end) / 2) ** 2 / (Ua * resitence * (c0 * m_water *\n 0.001 + c1 * list_weight[3] * 0.001 + 64.38)))'], {}), '(((voltage_begin + voltage_end) / 2) ** 2 / (Ua * resitence * (c0 *\n m_water * 0.001 + c1 * list_weight[3] * 0.001 + 64.38)))\n', (9444, 9572), False, 'from numpy import sqrt, abs\n'), ((10136, 10150), 'reportwriter.ReportWriter.ReportWriter', 'ReportWriter', ([], {}), '()\n', (10148, 10150), False, 'from reportwriter.ReportWriter import ReportWriter\n'), ((1550, 1585), 'os.startfile', 'os.startfile', (['self.PREVIEW_FILENAME'], {}), '(self.PREVIEW_FILENAME)\n', (1562, 1585), False, 'import os\n'), ((1720, 1758), 'os.startfile', 'os.startfile', (['self.DATA_SHEET_FILENAME'], {}), '(self.DATA_SHEET_FILENAME)\n', (1732, 1758), False, 'import os\n'), ((2306, 2347), 'os.startfile', 'os.startfile', (['self.REPORT_OUTPUT_FILENAME'], {}), '(self.REPORT_OUTPUT_FILENAME)\n', (2318, 2347), False, 'import os\n'), ((2520, 2548), 'xlrd.open_workbook', 'xlrd.open_workbook', (['filename'], {}), '(filename)\n', (2538, 2548), False, 'import xlrd\n'), ((3750, 3778), 'xlrd.open_workbook', 'xlrd.open_workbook', (['filename'], {}), '(filename)\n', (3768, 3778), False, 'import xlrd\n'), ((1301, 1312), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1309, 1312), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
from django_filters import (FilterSet, CharFilter, DateTimeFilter,
NumberFilter, BooleanFilter)
from guardian.shortcuts import get_objects_for_user
from rest_framework import filters
from chat.models import Post, Channel
def get_readable_channel_ids(user):
"""
Return a list of channel ids on which user given in parameter has at least
read_channel permission.
It also includes public channels, where anyone can read/write on.
Channel ids are unique.
"""
readable_channels = get_objects_for_user(user, 'chat.read_channel',
use_groups=True)
readable_ids = [c.id for c in readable_channels]
public_channels = Channel.objects.filter(public=True)
for public_channel in public_channels:
readable_ids.append(public_channel.id)
unique_readable_ids = set(readable_ids)
return unique_readable_ids
class ReadableChannelFilter(filters.BaseFilterBackend):
"""
All users cannot see what they want. They are restricted to see only
channels on which they have at least read_channel permission.
"""
def filter_queryset(self, request, queryset, view):
readable_channel_ids = get_readable_channel_ids(request.user)
return queryset.filter(id__in=readable_channel_ids)
class ChannelFilter(FilterSet):
name = CharFilter(name='name', lookup_type='icontains',
label='name contain filter')
public = BooleanFilter(name='public', label='is public ?')
ca_label = 'filter channels created after or on provided date / time'
created_after = DateTimeFilter(name='date', lookup_type='gte',
label=ca_label)
cb_label = 'filter channels created before or on provided date / time'
created_before = DateTimeFilter(name='date', lookup_type='lte',
label=ca_label)
class Meta:
model = Channel
fields = ('name', 'public', 'created_after', 'created_before',)
class ReadablePostFilter(filters.BaseFilterBackend):
"""
Since channels have permissions, posts posted in a channel are not visible
for anyone. This filter makes sure only posts a user can read will be
returned.
"""
def filter_queryset(self, request, queryset, view):
readable_channel_ids = get_readable_channel_ids(request.user)
return queryset.filter(channel__in=readable_channel_ids)
class PostFilter(FilterSet):
author = CharFilter(name='author', lookup_type='icontains',
label='author contain filter')
type = CharFilter(name='type', label='filter on letter value')
content = CharFilter(name='type', lookup_type='icontains',
label='content contain filter')
channel = NumberFilter(name='channel',
label='filters posts sent on provided channel')
afterid = NumberFilter(name='id', lookup_type='gt',
label='filter posts posted after given post id')
dflabel = 'filter posts posted after or on provided date / time'
datefrom = DateTimeFilter(name='date', lookup_type='gte', label=dflabel)
dtlabel = 'filter posts posted before or on provided date / time'
dateto = DateTimeFilter(name='date', lookup_type='lte', label=dtlabel)
content = CharFilter(name='content', lookup_type='icontains',
label='content contain filter')
class Meta:
model = Post
fields = ('author', 'type', 'content', 'datefrom', 'dateto',)
|
[
"django_filters.CharFilter",
"chat.models.Channel.objects.filter",
"django_filters.NumberFilter",
"django_filters.DateTimeFilter",
"guardian.shortcuts.get_objects_for_user",
"django_filters.BooleanFilter"
] |
[((561, 625), 'guardian.shortcuts.get_objects_for_user', 'get_objects_for_user', (['user', '"""chat.read_channel"""'], {'use_groups': '(True)'}), "(user, 'chat.read_channel', use_groups=True)\n", (581, 625), False, 'from guardian.shortcuts import get_objects_for_user\n'), ((747, 782), 'chat.models.Channel.objects.filter', 'Channel.objects.filter', ([], {'public': '(True)'}), '(public=True)\n', (769, 782), False, 'from chat.models import Post, Channel\n'), ((1395, 1472), 'django_filters.CharFilter', 'CharFilter', ([], {'name': '"""name"""', 'lookup_type': '"""icontains"""', 'label': '"""name contain filter"""'}), "(name='name', lookup_type='icontains', label='name contain filter')\n", (1405, 1472), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((1508, 1557), 'django_filters.BooleanFilter', 'BooleanFilter', ([], {'name': '"""public"""', 'label': '"""is public ?"""'}), "(name='public', label='is public ?')\n", (1521, 1557), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((1653, 1715), 'django_filters.DateTimeFilter', 'DateTimeFilter', ([], {'name': '"""date"""', 'lookup_type': '"""gte"""', 'label': 'ca_label'}), "(name='date', lookup_type='gte', label=ca_label)\n", (1667, 1715), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((1847, 1909), 'django_filters.DateTimeFilter', 'DateTimeFilter', ([], {'name': '"""date"""', 'lookup_type': '"""lte"""', 'label': 'ca_label'}), "(name='date', lookup_type='lte', label=ca_label)\n", (1861, 1909), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((2533, 2619), 'django_filters.CharFilter', 'CharFilter', ([], {'name': '"""author"""', 'lookup_type': '"""icontains"""', 'label': '"""author contain filter"""'}), "(name='author', lookup_type='icontains', label=\n 'author contain filter')\n", (2543, 2619), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((2650, 2705), 'django_filters.CharFilter', 'CharFilter', ([], {'name': '"""type"""', 'label': '"""filter on letter value"""'}), "(name='type', label='filter on letter value')\n", (2660, 2705), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((2720, 2805), 'django_filters.CharFilter', 'CharFilter', ([], {'name': '"""type"""', 'lookup_type': '"""icontains"""', 'label': '"""content contain filter"""'}), "(name='type', lookup_type='icontains', label='content contain filter'\n )\n", (2730, 2805), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((2840, 2916), 'django_filters.NumberFilter', 'NumberFilter', ([], {'name': '"""channel"""', 'label': '"""filters posts sent on provided channel"""'}), "(name='channel', label='filters posts sent on provided channel')\n", (2852, 2916), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((2958, 3053), 'django_filters.NumberFilter', 'NumberFilter', ([], {'name': '"""id"""', 'lookup_type': '"""gt"""', 'label': '"""filter posts posted after given post id"""'}), "(name='id', lookup_type='gt', label=\n 'filter posts posted after given post id')\n", (2970, 3053), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((3161, 3222), 'django_filters.DateTimeFilter', 'DateTimeFilter', ([], {'name': '"""date"""', 'lookup_type': '"""gte"""', 'label': 'dflabel'}), "(name='date', lookup_type='gte', label=dflabel)\n", (3175, 3222), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((3306, 3367), 'django_filters.DateTimeFilter', 'DateTimeFilter', ([], {'name': '"""date"""', 'lookup_type': '"""lte"""', 'label': 'dtlabel'}), "(name='date', lookup_type='lte', label=dtlabel)\n", (3320, 3367), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n'), ((3382, 3470), 'django_filters.CharFilter', 'CharFilter', ([], {'name': '"""content"""', 'lookup_type': '"""icontains"""', 'label': '"""content contain filter"""'}), "(name='content', lookup_type='icontains', label=\n 'content contain filter')\n", (3392, 3470), False, 'from django_filters import FilterSet, CharFilter, DateTimeFilter, NumberFilter, BooleanFilter\n')]
|
import uuid
from typing import List, Optional
from todo_api.extensions import db
from todo_api.models import Todo
def create_todo(text: str, user_id: int) -> Todo:
todo = Todo(public_id=uuid.uuid4(), text=text, user_id=user_id)
db.session.add(todo)
db.session.commit()
return todo
def get_all_todos() -> List[Todo]:
return Todo.query.all()
def get_todo_by_public_id(public_id: str) -> Optional[Todo]:
todo = Todo.query.filter_by(public_id=public_id).first()
return todo
def update_todo(public_id: str, text: str, completed: bool) -> Optional[Todo]:
todo = Todo.query.filter_by(public_id=public_id).first()
if not todo:
return None
todo.text = text
todo.completed = completed
db.session.commit()
return todo
def complete_todo(public_id: str) -> Optional[Todo]:
todo = Todo.query.filter_by(public_id=public_id).first()
if not todo:
return None
todo.completed = True
db.session.commit()
return todo
def delete_todo(public_id: str) -> bool:
todo = Todo.query.filter_by(public_id=public_id).first()
if not todo:
return False
db.session.delete(todo)
db.session.commit()
return True
def delete_all_todos() -> int:
todos_deleted = Todo.query.delete()
db.session.commit()
return todos_deleted
|
[
"uuid.uuid4",
"todo_api.extensions.db.session.add",
"todo_api.models.Todo.query.delete",
"todo_api.models.Todo.query.all",
"todo_api.extensions.db.session.commit",
"todo_api.models.Todo.query.filter_by",
"todo_api.extensions.db.session.delete"
] |
[((240, 260), 'todo_api.extensions.db.session.add', 'db.session.add', (['todo'], {}), '(todo)\n', (254, 260), False, 'from todo_api.extensions import db\n'), ((265, 284), 'todo_api.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (282, 284), False, 'from todo_api.extensions import db\n'), ((350, 366), 'todo_api.models.Todo.query.all', 'Todo.query.all', ([], {}), '()\n', (364, 366), False, 'from todo_api.models import Todo\n'), ((746, 765), 'todo_api.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (763, 765), False, 'from todo_api.extensions import db\n'), ((969, 988), 'todo_api.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (986, 988), False, 'from todo_api.extensions import db\n'), ((1154, 1177), 'todo_api.extensions.db.session.delete', 'db.session.delete', (['todo'], {}), '(todo)\n', (1171, 1177), False, 'from todo_api.extensions import db\n'), ((1182, 1201), 'todo_api.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1199, 1201), False, 'from todo_api.extensions import db\n'), ((1272, 1291), 'todo_api.models.Todo.query.delete', 'Todo.query.delete', ([], {}), '()\n', (1289, 1291), False, 'from todo_api.models import Todo\n'), ((1297, 1316), 'todo_api.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1314, 1316), False, 'from todo_api.extensions import db\n'), ((193, 205), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (203, 205), False, 'import uuid\n'), ((441, 482), 'todo_api.models.Todo.query.filter_by', 'Todo.query.filter_by', ([], {'public_id': 'public_id'}), '(public_id=public_id)\n', (461, 482), False, 'from todo_api.models import Todo\n'), ((600, 641), 'todo_api.models.Todo.query.filter_by', 'Todo.query.filter_by', ([], {'public_id': 'public_id'}), '(public_id=public_id)\n', (620, 641), False, 'from todo_api.models import Todo\n'), ((849, 890), 'todo_api.models.Todo.query.filter_by', 'Todo.query.filter_by', ([], {'public_id': 'public_id'}), '(public_id=public_id)\n', (869, 890), False, 'from todo_api.models import Todo\n'), ((1060, 1101), 'todo_api.models.Todo.query.filter_by', 'Todo.query.filter_by', ([], {'public_id': 'public_id'}), '(public_id=public_id)\n', (1080, 1101), False, 'from todo_api.models import Todo\n')]
|
# Authors: <NAME> (lambertt) and <NAME> (odafaluy)
import numpy
import scipy
import scipy.linalg
import plot
class Matrix:
"""
Provides Methods for operations with an hilbert- or a special triangular matrix.
"""
def __init__(self, mtype, dim, dtype):
"""
Initializes the class instance.
:param mtype: The matrix type ("hilbert" or "saite" for triangular)
:param dim: The dimension. Must be > 0.
:param dtype: The type to use. Can be "float16", "float32" or "flaot64"
"""
if mtype not in ["saite", "hilbert"]:
raise Exception("Unknown mtype. Allowed are 'hilbert' and 'saite'.")
self.mtype = mtype
if dim <= 0:
raise Exception("dim must be > 0")
self.dim = dim
if dtype not in ["float16", "float32", "float64"]:
raise Exception("Unknown dtype. Allowed are 'float16', 'float32' and 'float64'.")
self.dtype = dtype
self.dtype_constructor = None
self.matrix = None
self.inv = None
self.l = None
self.u = None
self.create_matrix_and_inv()
def create_matrix_and_inv(self):
"""
Calculates the matrix from the values given to the constructor and its inverse.
:return: Nothing.
"""
arr = []
if self.mtype == "saite":
for row in xrange(0, self.dim):
arr.append([])
for col in xrange(0, self.dim):
if row == col:
arr[row].append(2)
elif row - 1 == col or col - 1 == row:
arr[row].append(-1)
else:
arr[row].append(0)
if self.mtype == "hilbert":
arr = scipy.linalg.hilbert(self.dim).tolist()
self.matrix = numpy.array(arr, dtype=self.dtype)
self.inv = scipy.linalg.inv(self.matrix)
def condition(self):
"""
Calculates the condition of the matrix.
:return: The condition of the matrix.
"""
return numpy.linalg.norm(self.matrix, ord=numpy.inf) * numpy.linalg.norm(self.inv, ord=numpy.inf)
def lu(self):
"""
Splits the matrix into l (left lower) and u (right upper) matrices. (Matrix A = LU)
:return: A Tuple l,u of matrices
"""
if self.l is None or self.u is None:
self.l, self.u = scipy.linalg.lu(self.matrix, permute_l=True)
return self.l, self.u
def solve(self, b):
"""
Solves the equation Ax=b for x and the matrix A.
:param b: The vector b to solve the Matrix for.
:return: The vector x from Ax=b.
"""
l, u = self.lu()
x = scipy.linalg.solve_triangular(l, b, lower=True)
x = scipy.linalg.solve_triangular(u, x, lower=False)
return x
def main_31b(mtypes, dims, dtypes):
"""
Executes experiments as described in 3.1B.
:param mtypes: The mtype-values to use.
:param dims: The dimensions to use.
:param dtypes: The dtype-values to use.
:return: Nothing.
"""
for mtype in mtypes:
for dim in dims:
for dtype in dtypes:
print("")
print("Experiment for mtype={0}, dim={1}, dtype={2}".format(mtype, dim, dtype))
identity = numpy.identity(dim, dtype)
matrix = Matrix(mtype, dim, dtype)
m = identity - (numpy.dot(matrix.matrix, matrix.inv))
try:
m_inv = scipy.linalg.inv(m)
except (numpy.linalg.linalg.LinAlgError, ValueError) as ex:
print("Cannot calculate inverse of M: " + ex.message)
continue
condition = numpy.linalg.norm(m, ord=numpy.inf) * numpy.linalg.norm(m_inv, ord=numpy.inf)
print("cond(M) = {1} || I - M M^(-1) || = {0}".format(condition, matrix.condition()))
def main_32b_saite(n):
plot.plot(n)
def main_32b_hilbert(i_max, dtype, n):
"""
Executes experiments as described in 3.2B B. (Hilbert)
:param i_max: The maximum i to use
:param dtype: the data-type to use (float16, float32 or float64)
:param n: The dimension to use.
:return: Nothing.
"""
matrix = Matrix("hilbert", n, dtype)
print("Hilbert Matrix with n={0} and type {1}".format(n, dtype))
result = numpy.identity(n, dtype=dtype)
for i in xrange(1, i_max + 1):
result = numpy.dot(result, matrix.matrix)
print("i = {0}, x^{0} = ".format(i))
print(result)
def main_32b(dtypes, n_iterable, i_iterable):
"""
Executes experiments as described in 3.2B
:param dtypes: the data-type to use (float16, float32 or float64)
:param n_iterable: The n-values to use.
:param i_iterable: The i-values to use. (if i>n it will be ignored).
:return: Nothing.
"""
for dtype in dtypes:
for n in n_iterable:
for i_max in i_iterable:
if i_max > n:
continue
main_32b_hilbert(i_max, dtype, n)
def main(experiment, mtypes=None, dims=None, dtypes=None, n_iterable=None, i_iterable=None):
"""
Executes experiments as described.
See start.py for more information.
:return: Nothing.
"""
if experiment == "3.1B":
main_31b(mtypes, dims, dtypes)
elif experiment == "3.2B - A":
for n in n_iterable:
main_32b_saite(n)
elif experiment == "3.2B - B":
main_32b(dtypes, n_iterable, i_iterable)
else:
print("Unknown experiment")
|
[
"scipy.linalg.hilbert",
"scipy.linalg.solve_triangular",
"numpy.identity",
"scipy.linalg.lu",
"scipy.linalg.inv",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"plot.plot"
] |
[((4016, 4028), 'plot.plot', 'plot.plot', (['n'], {}), '(n)\n', (4025, 4028), False, 'import plot\n'), ((4435, 4465), 'numpy.identity', 'numpy.identity', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (4449, 4465), False, 'import numpy\n'), ((1860, 1894), 'numpy.array', 'numpy.array', (['arr'], {'dtype': 'self.dtype'}), '(arr, dtype=self.dtype)\n', (1871, 1894), False, 'import numpy\n'), ((1914, 1943), 'scipy.linalg.inv', 'scipy.linalg.inv', (['self.matrix'], {}), '(self.matrix)\n', (1930, 1943), False, 'import scipy\n'), ((2762, 2809), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['l', 'b'], {'lower': '(True)'}), '(l, b, lower=True)\n', (2791, 2809), False, 'import scipy\n'), ((2822, 2870), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['u', 'x'], {'lower': '(False)'}), '(u, x, lower=False)\n', (2851, 2870), False, 'import scipy\n'), ((4518, 4550), 'numpy.dot', 'numpy.dot', (['result', 'matrix.matrix'], {}), '(result, matrix.matrix)\n', (4527, 4550), False, 'import numpy\n'), ((2104, 2149), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.matrix'], {'ord': 'numpy.inf'}), '(self.matrix, ord=numpy.inf)\n', (2121, 2149), False, 'import numpy\n'), ((2152, 2194), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.inv'], {'ord': 'numpy.inf'}), '(self.inv, ord=numpy.inf)\n', (2169, 2194), False, 'import numpy\n'), ((2446, 2490), 'scipy.linalg.lu', 'scipy.linalg.lu', (['self.matrix'], {'permute_l': '(True)'}), '(self.matrix, permute_l=True)\n', (2461, 2490), False, 'import scipy\n'), ((3373, 3399), 'numpy.identity', 'numpy.identity', (['dim', 'dtype'], {}), '(dim, dtype)\n', (3387, 3399), False, 'import numpy\n'), ((1797, 1827), 'scipy.linalg.hilbert', 'scipy.linalg.hilbert', (['self.dim'], {}), '(self.dim)\n', (1817, 1827), False, 'import scipy\n'), ((3483, 3519), 'numpy.dot', 'numpy.dot', (['matrix.matrix', 'matrix.inv'], {}), '(matrix.matrix, matrix.inv)\n', (3492, 3519), False, 'import numpy\n'), ((3571, 3590), 'scipy.linalg.inv', 'scipy.linalg.inv', (['m'], {}), '(m)\n', (3587, 3590), False, 'import scipy\n'), ((3799, 3834), 'numpy.linalg.norm', 'numpy.linalg.norm', (['m'], {'ord': 'numpy.inf'}), '(m, ord=numpy.inf)\n', (3816, 3834), False, 'import numpy\n'), ((3837, 3876), 'numpy.linalg.norm', 'numpy.linalg.norm', (['m_inv'], {'ord': 'numpy.inf'}), '(m_inv, ord=numpy.inf)\n', (3854, 3876), False, 'import numpy\n')]
|
import pygame
import random
from toast.quadtree import QuadTree
from toast.scene_graph import GameObject, Scene
from toast.camera import Camera
from toast.event_manager import EventManager
from examples.demo_game import DemoGame
class QuadTreeVisualizer(GameObject):
def __init__(self, quadtree):
super(QuadTreeVisualizer, self).__init__()
self.quadtree = quadtree
def render(self, surface, offset=(0,0)):
self.render_quadtree(surface, self.quadtree)
def render_quadtree(self, surface, quadtree):
pygame.draw.rect(surface, (255,0,0), quadtree.quadrant, 1)
if quadtree.northwest_tree is not None:
self.render_quadtree(surface, quadtree.northwest_tree)
if quadtree.northeast_tree is not None:
self.render_quadtree(surface, quadtree.northeast_tree)
if quadtree.southwest_tree is not None:
self.render_quadtree(surface, quadtree.southwest_tree)
if quadtree.southeast_tree is not None:
self.render_quadtree(surface, quadtree.southeast_tree)
if quadtree.bucket is not []:
for item in quadtree.bucket:
item.render(surface)
class RectComponent(GameObject):
def __init__(self, left, top, width, height):
super(RectComponent, self).__init__()
self.left = left
self.top = top
self.width = width
self.height = height
def __getitem__(self, index):
if index == 0:
return self.left
if index == 1:
return self.top
if index == 2:
return self.width
if index == 3:
return self.height
def render(self, surface, offset=(0,0)):
rect = self.left, self.top, self.width, self.height
pygame.draw.rect(surface, (255,255,255), rect, 1)
class NewScene(Scene):
def __init__(self):
super(NewScene, self).__init__()
EventManager.subscribe(self, 'onMouseDown')
Camera.current_camera.viewport = 512, 512
Camera.current_camera.position = 256, 256
w = h = 2**9
region = (0,0,w,h)
self.quadtree = QuadTree([], region)
self.add(QuadTreeVisualizer(self.quadtree))
def onMouseDown(self, event):
if event.button is 1:
p = DemoGame.camera_to_world(event.pos)
d = 2 ** random.randint(1,5)
self.quadtree.insert(RectComponent(p[0], p[1], d, d))
game = DemoGame((512, 512), NewScene)
game.run()
|
[
"random.randint",
"pygame.draw.rect",
"examples.demo_game.DemoGame",
"examples.demo_game.DemoGame.camera_to_world",
"toast.quadtree.QuadTree",
"toast.event_manager.EventManager.subscribe"
] |
[((2592, 2622), 'examples.demo_game.DemoGame', 'DemoGame', (['(512, 512)', 'NewScene'], {}), '((512, 512), NewScene)\n', (2600, 2622), False, 'from examples.demo_game import DemoGame\n'), ((571, 631), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', '(255, 0, 0)', 'quadtree.quadrant', '(1)'], {}), '(surface, (255, 0, 0), quadtree.quadrant, 1)\n', (587, 631), False, 'import pygame\n'), ((1850, 1901), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', '(255, 255, 255)', 'rect', '(1)'], {}), '(surface, (255, 255, 255), rect, 1)\n', (1866, 1901), False, 'import pygame\n'), ((2014, 2057), 'toast.event_manager.EventManager.subscribe', 'EventManager.subscribe', (['self', '"""onMouseDown"""'], {}), "(self, 'onMouseDown')\n", (2036, 2057), False, 'from toast.event_manager import EventManager\n'), ((2257, 2277), 'toast.quadtree.QuadTree', 'QuadTree', (['[]', 'region'], {}), '([], region)\n', (2265, 2277), False, 'from toast.quadtree import QuadTree\n'), ((2428, 2463), 'examples.demo_game.DemoGame.camera_to_world', 'DemoGame.camera_to_world', (['event.pos'], {}), '(event.pos)\n', (2452, 2463), False, 'from examples.demo_game import DemoGame\n'), ((2498, 2518), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (2512, 2518), False, 'import random\n')]
|
"""empty message
Revision ID: c1ca0249cb60
Revises: 0b986a10b559
Create Date: 2020-01-07 08:36:09.067866
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c1ca0249cb60'
down_revision = '0b986a10b559'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tiles', sa.Column('max_y', sa.Float(), nullable=True))
op.add_column('tiles', sa.Column('min_x', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tiles', 'min_x')
op.drop_column('tiles', 'max_y')
# ### end Alembic commands ###
|
[
"alembic.op.drop_column",
"sqlalchemy.Float"
] |
[((652, 684), 'alembic.op.drop_column', 'op.drop_column', (['"""tiles"""', '"""min_x"""'], {}), "('tiles', 'min_x')\n", (666, 684), False, 'from alembic import op\n'), ((689, 721), 'alembic.op.drop_column', 'op.drop_column', (['"""tiles"""', '"""max_y"""'], {}), "('tiles', 'max_y')\n", (703, 721), False, 'from alembic import op\n'), ((426, 436), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (434, 436), True, 'import sqlalchemy as sa\n'), ((500, 510), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (508, 510), True, 'import sqlalchemy as sa\n')]
|
import unittest
from testutils import getZserioApi
class FullConstTypeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "expressions.zs").full_const_type
def testBitSizeOfWithOptional(self):
fullConstTypeExpression = self.api.FullConstTypeExpression(self.FULL_VALID_VALUE,
self.FULL_ADDITIONAL_VALUE)
self.assertEqual(self.FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITH_OPTIONAL,
fullConstTypeExpression.bitsizeof())
def testBitSizeOfWithoutOptional(self):
fullConstTypeExpression = self.api.FullConstTypeExpression()
fullConstTypeExpression.value = self.FULL_INVALID_VALUE
self.assertEqual(self.FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITHOUT_OPTIONAL,
fullConstTypeExpression.bitsizeof())
FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITH_OPTIONAL = 10
FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITHOUT_OPTIONAL = 7
FULL_VALID_VALUE = 0x01
FULL_INVALID_VALUE = 0x00
FULL_ADDITIONAL_VALUE = 0x03
|
[
"testutils.getZserioApi"
] |
[((157, 197), 'testutils.getZserioApi', 'getZserioApi', (['__file__', '"""expressions.zs"""'], {}), "(__file__, 'expressions.zs')\n", (169, 197), False, 'from testutils import getZserioApi\n')]
|
from __future__ import annotations
from dataclasses import dataclass
import logging
from typing import Tuple, Optional
from typing_extensions import Literal
log = logging.getLogger(__name__)
GripperName = Literal["gripper"]
GripperModel = Literal["gripper_v1"]
DEFAULT_GRIPPER_CALIBRATION_OFFSET = [0.0, 0.0, 0.0]
@dataclass(frozen=True)
class GripperConfig:
gripper_offset: Tuple[float, float, float]
gripper_current: float
display_name: str
name: GripperName
max_travel: float
home_position: float
steps_per_mm: float
idle_current: float
model: GripperModel
DUMMY_GRIPPER_CONFIG = GripperConfig(
gripper_offset=(0.0, 0.0, 0.0),
gripper_current=1.0,
display_name="dummy_gripper",
name="gripper",
max_travel=50.0,
home_position=0.0,
steps_per_mm=480.0,
idle_current=0.2,
model="gripper_v1",
)
def load(
gripper_model: Optional[int] = None, gripper_id: Optional[int] = None
) -> GripperConfig:
return DUMMY_GRIPPER_CONFIG # TODO: load actual gripper config
|
[
"dataclasses.dataclass",
"logging.getLogger"
] |
[((164, 191), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (181, 191), False, 'import logging\n'), ((320, 342), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (329, 342), False, 'from dataclasses import dataclass\n')]
|
import matplotlib.pyplot as plt
import pandas as pd
from app.requester import ExchangeRateRequester
class Grapher:
def __init__(self) -> None:
self.exchange_rate_requester = ExchangeRateRequester()
def _create_historical_rates_df(self, transactions) -> pd.DataFrame:
assert transactions, "There are no transactions"
first_transaction = transactions[0].date
historical_rates_date = self.exchange_rate_requester.get_historical_bids(first_transaction)
historical_rates_df = pd.DataFrame(list(historical_rates_date.items()), columns=["date", "rate"])
return historical_rates_df
@staticmethod
def run_calculations(operations_df: pd.DataFrame) -> pd.DataFrame:
operations_df["transaction[+/-]"] = operations_df["transaction[+/-]"].fillna(0)
operations_df["portfolio_value"] = operations_df["transaction[+/-]"].cumsum()
operations_df["transaction_rate"] = operations_df["transaction_rate"].fillna(method="backfill")
operations_df["value_pln_temp"] = operations_df["portfolio_value"] * operations_df["rate"]
operations_df["value_pln_after_transaction"] = (
operations_df["portfolio_value"] * operations_df["transaction_rate"]
)
operations_df["profit"] = (
operations_df["value_pln_temp"] / operations_df["value_pln_after_transaction"] - 1
) * 100
return operations_df
def _create_operations_df(self, transactions: list) -> pd.DataFrame:
assert transactions, "There are no transactions"
operations_df = self._create_historical_rates_df(transactions)
transactions_df = pd.DataFrame(transactions, columns=["date", "transaction[+/-]", "transaction_rate"])
operations_df = pd.merge(operations_df, transactions_df, on="date", how="outer")
calculated_operations_df = self.run_calculations(operations_df)
return calculated_operations_df
def plot_historical_rates(self, historical_rates: pd.DataFrame) -> None:
historical_rates.plot(x="date", y="rate")
plt.grid()
plt.xlabel("date")
plt.xticks(rotation=45)
plt.ylabel("rate")
plt.title("Historical rates [PLN]")
plt.tight_layout()
plt.show()
def plot_portfolio_value_pln(self, operations: pd.DataFrame) -> None:
operations.plot(x="date", y="value_pln_temp")
plt.grid()
plt.xlabel("date")
plt.xticks(rotation=55)
plt.ylabel("value")
plt.title("Historical portfolio value [PLN]")
plt.tight_layout()
plt.show()
def plot_profit(self, operations: pd.DataFrame) -> None:
operations.plot(x="date", y="profit")
plt.grid()
plt.xlabel("date")
plt.xticks(rotation=45)
plt.ylabel("profit")
plt.title("Historical portfolio profit [%]")
plt.tight_layout()
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"app.requester.ExchangeRateRequester",
"pandas.merge",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((189, 212), 'app.requester.ExchangeRateRequester', 'ExchangeRateRequester', ([], {}), '()\n', (210, 212), False, 'from app.requester import ExchangeRateRequester\n'), ((1658, 1746), 'pandas.DataFrame', 'pd.DataFrame', (['transactions'], {'columns': "['date', 'transaction[+/-]', 'transaction_rate']"}), "(transactions, columns=['date', 'transaction[+/-]',\n 'transaction_rate'])\n", (1670, 1746), True, 'import pandas as pd\n'), ((1767, 1831), 'pandas.merge', 'pd.merge', (['operations_df', 'transactions_df'], {'on': '"""date"""', 'how': '"""outer"""'}), "(operations_df, transactions_df, on='date', how='outer')\n", (1775, 1831), True, 'import pandas as pd\n'), ((2081, 2091), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2089, 2091), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2118), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (2110, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2150), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2137, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""rate"""'], {}), "('rate')\n", (2169, 2177), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2221), 'matplotlib.pyplot.title', 'plt.title', (['"""Historical rates [PLN]"""'], {}), "('Historical rates [PLN]')\n", (2195, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2230, 2248), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2246, 2248), True, 'import matplotlib.pyplot as plt\n'), ((2257, 2267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2265, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2415), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2413, 2415), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2442), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (2434, 2442), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2474), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(55)'}), '(rotation=55)\n', (2461, 2474), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (2493, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2556), 'matplotlib.pyplot.title', 'plt.title', (['"""Historical portfolio value [PLN]"""'], {}), "('Historical portfolio value [PLN]')\n", (2520, 2556), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2583), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2581, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2602), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2600, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2729), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2727, 2729), True, 'import matplotlib.pyplot as plt\n'), ((2738, 2756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (2748, 2756), True, 'import matplotlib.pyplot as plt\n'), ((2765, 2788), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2775, 2788), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""profit"""'], {}), "('profit')\n", (2807, 2817), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2870), 'matplotlib.pyplot.title', 'plt.title', (['"""Historical portfolio profit [%]"""'], {}), "('Historical portfolio profit [%]')\n", (2835, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2897), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2895, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2914, 2916), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""Workflow encapsulation package for performing actions using the Tanium API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import time
from collections import OrderedDict
from . import exceptions
from .. import utils
from .. import results
class Workflow(object):
def __init__(self, adapter, obj, lvl="info", result=None):
"""Constructor.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
obj (:obj:`tantrum.api_models.ApiModel`):
API Object to use for this workflow.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
result (:obj:`tantrum.results.Result`, optional):
Result object that ``obj`` was generated from.
Defaults to: None.
"""
self._lvl = lvl
self.log = utils.logs.get_obj_log(obj=self, lvl=lvl)
self.obj = obj
self.adapter = adapter
self._result = result
self._last_result = result
def __repr__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
return self.__str__()
@property
def api_objects(self):
return self.adapter.api_objects
class Clients(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
bits = ["count={}".format(len(self.obj))]
bits = "({})".format(", ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@staticmethod
def build_last_reg_filter(
adapter, last_reg=300, operator="greaterequal", not_flag=False, filters=None
):
"""Build a set of filters to be used in :meth:`Clients.get_all.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
last_reg (:obj:`int`, optional):
Only return clients that have registered in N number of seconds.
Defaults to: 300.
operator (:obj:`str`, optional):
Defines how the last_registered attribute of each client status is
compared against the value in last_reg.
Must be one of :data:`OPERATOR_MAP`.
Defaults to: "greaterequal".
not_flag (:obj:`int`, optional):
If True have the API return all rows that do not match the operator.
Defaults to: 1000.
filters (:obj:`object`, optional):
If a CacheFilterList object is supplied, the last_registration filter
generated by this method will be appended to it. If this is None,
a new CacheFilterList will be created with the last_registration filter
being the only item in it.
Defaults to: None.
Returns:
:obj:`Clients`
"""
op_dict = get_operator_map(operator)
now_dt = datetime.datetime.utcnow()
ago_td = datetime.timedelta(seconds=-(int(last_reg)))
ago_dt = now_dt + ago_td
ago_str = ago_dt.strftime(adapter.api_objects.module_dt)
cfilter = adapter.api_objects.CacheFilter(
field="last_registration",
type="Date",
operator=op_dict["op"],
not_flag=not_flag,
value=ago_str,
)
filters = filters or adapter.api_objects.CacheFilterList()
filters.append(cfilter)
return filters
@classmethod
def get_all_iter(
cls,
adapter,
filters=None,
sort_fields="last_registration",
page_size=1000,
max_page_count=0,
cache_expiration=600,
sleep=2,
lvl="info",
**kwargs
):
"""Get all Clients as an iterator.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
filters (:obj:`object`, optional):
Tantrum CacheFilterList returned from
:meth:`Clients.build_last_reg_filter`.
Defaults to: None.
sort_fields (:obj:`str`, optional):
Attribute of a ClientStatus object to have API sort the return on.
Defaults to: "last_registration".
page_size (:obj:`int`, optional):
Get N number of clients at a time from the API.
If 0, disables paging and gets all clients in one call.
Defaults to: 1000.
max_page_count (:obj:`int`, optional):
Only fetch up to this many pages. If 0, get all pages.
Defaults to: 0.
cache_expiration (:obj:`int`, optional):
When page_size is not 0, have the API keep the cache of clients
for this many seconds before expiring the cache.
Defaults to: 600.
sleep (:obj:`int`, optional):
Wait N seconds between fetching each page.
Defaults to: 2.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get`.
Yields:
:obj:`tantrum.api_objects.ApiObjects`: ClientStatus API object
"""
log = utils.logs.get_obj_log(obj=cls, lvl=lvl)
get_args = {}
get_args.update(kwargs)
get_args["cache_sort_fields"] = sort_fields
get_args["obj"] = adapter.api_objects.ClientStatus()
row_start = 0
row_count = page_size
if filters is not None:
get_args["cache_filters"] = filters
if page_size:
get_args["row_start"] = row_start
get_args["row_count"] = row_count
get_args["cache_expiration"] = cache_expiration
result = adapter.cmd_get(**get_args)
result_obj = result()
received_rows = len(result_obj)
result_cache = getattr(result_obj, "cache_info", None)
total_rows = getattr(result_cache, "filtered_row_count", 0)
cache_id = getattr(result_cache, "cache_id", None)
get_args["cache_id"] = cache_id
page_count = 1
m = "Received initial page length={len}, cache_info={cache!r}"
m = m.format(len=received_rows, cache=result_cache)
log.info(m)
for obj in result_obj:
yield obj
if page_size:
paging_get_args = {k: v for k, v in get_args.items()}
while True:
if max_page_count and page_count >= max_page_count:
m = "Reached max page count {c}, considering all clients fetched"
m = m.format(c=max_page_count)
log.info(m)
break
if received_rows >= total_rows:
m = "Reached total rows count {c}, considering all clients fetched"
m = m.format(c=total_rows)
log.info(m)
break
page_count += 1
row_start += row_count
paging_get_args["row_start"] = row_start
paging_result = adapter.cmd_get(**paging_get_args)
log.debug(result.pretty_bodies())
paging_result_obj = paging_result()
page_rows = len(paging_result_obj)
received_rows += page_rows
m = [
"Received page_rows={page_rows}",
"received_rows={received_rows}",
"total_rows={total_rows}",
]
m = ", ".join(m)
m = m.format(
page_rows=page_rows,
received_rows=received_rows,
total_rows=total_rows,
)
log.info(m)
for obj in paging_result_obj:
yield obj
time.sleep(sleep)
@classmethod
def get_all(cls, adapter, **kwargs):
"""Get all Clients.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
**kwargs:
rest of kwargs:
Passed to :meth:`Clients.get_all_iter`.
Returns:
:obj:`tantrum.api_objects.ApiObjects`: SystemStatusList API object
"""
obj = adapter.api_objects.SystemStatusList()
for client_obj in cls.get_all_iter(adapter=adapter, **kwargs):
obj.append(client_obj)
return obj
class Sensor(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
bits = [
"name={!r}".format(self.obj.name),
"filter={}".format(", ".join(self.filter_vals)),
]
if self.params_defined or self.param_values:
bits += [
"params_defined={}".format(list(self.params_defined.keys())),
"param_values={}".format(list(self.param_values.items())),
]
bits = "({})".format(", ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@classmethod
def get_by_name(cls, adapter, name, lvl="info"):
"""Get a sensor object by name.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
name (:obj:`str`):
Name of sensor to fetch.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Sensor`
"""
result = adapter.cmd_get(obj=adapter.api_objects.Sensor(name=name))
return cls(adapter=adapter, obj=result(), lvl=lvl, result=result)
@classmethod
def get_by_id(cls, adapter, id, lvl="info"):
"""Get a sensor object by id.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
id (:obj:`int`):
id of sensor to fetch.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Sensor`
"""
result = adapter.cmd_get(obj=adapter.api_objects.Sensor(id=id))
return cls(adapter=adapter, obj=result(), lvl=lvl, result=result)
@property
def params_defined(self):
"""Get the parameter definitions for this sensor.
Notes:
Will try to resolve a default value and store it in "derived_default" key
for each parameter definition returned.
Returns:
:obj:`collections.OrderedDict`
"""
param_defs = json.loads(self.obj.parameter_definition or "{}")
params = param_defs.get("parameters", [])
for p in params:
pdef = p.get("defaultValue", "")
pval = p.get("value", "")
pvals = p.get("values", [])
if pdef not in ["", None]:
derived_default = pdef
elif pval not in ["", None]:
derived_default = pval
elif pvals:
derived_default = pvals[0]
else:
derived_default = ""
p["derived_default"] = derived_default
return OrderedDict((p["key"], p) for p in params)
@property
def param_values(self):
"""Get all of the parameter key and values.
Returns:
:obj:`OrderedDict`
"""
ret = OrderedDict()
for k in self.params_defined:
ret[k] = ""
for p in self.params:
ret[p.key] = p.value
return ret
@property
def params(self):
"""Get the parameters that are set for this sensor.
Returns:
:obj:`tantrum.api_objects.ApiObjects`: ParameterList API object
"""
if not hasattr(self, "_params"):
self._params = self.api_objects.ParameterList()
return self._params
def set_parameter(
self, key, value="", derive_default=True, delim="||", allow_undefined=True
):
"""Set a parameters value for this sensor.
Args:
key (:obj:`str`):
Key name of parameter to set.
value (:obj:`str`, optional):
Value of parameter to set.
Defaults to: "".
derive_default (:obj:`bool`, optional):
Get default value from parameter definition if value is "".
Defaults to: True.
delim (:obj:`str`, optional):
String to put before and after parameter key name when sending to API.
Defaults to: "||".
allow_undefined (:obj:`bool`, optional):
Allow parameter keys that are not in the parameters definition
for this sensor to be set.
Throws exception if False and key not in :attr:`Sensor.param_keys`.
Defaults to: True.
"""
param_def = self.params_defined.get(key, None)
if param_def is None:
m = "Parameter key {o!r} is not one of the defined parameters {ov}"
m = m.format(o=key, ov=list(self.params_defined.keys()))
if allow_undefined:
self.log.info(m)
else:
raise exceptions.ModuleError(m)
elif derive_default and value == "":
value = param_def.get("derived_default", "")
key_delim = "{d}{key}{d}".format(d=delim, key=key)
param = self.api_objects.Parameter(key=key_delim, value=value)
self.params.append(param)
@property
def filter(self):
"""Get the filter for this sensor.
Returns:
:obj:`tantrum.api_objects.ApiObjects`: Filter API object
"""
if not hasattr(self, "_filter"):
self._filter = self.api_objects.Filter()
self._filter.sensor = self.api_objects.Sensor()
self._filter.sensor.hash = self.obj.hash
return self._filter
@property
def filter_vals(self):
"""Get the key value pairs of the filter for this sensor.
Returns:
:obj:`list` of :obj:`str`
"""
if any([self.filter.value, self.filter.operator]):
keys = [
"operator",
"value",
"ignore_case_flag",
"not_flag",
"all_values_flag",
"max_age_seconds",
"value_type",
]
vals = ["{}: {!r}".format(k, getattr(self.filter, k)) for k in keys]
else:
vals = []
return vals
def set_filter(
self,
value,
operator="regex",
ignore_case_flag=True,
not_flag=False,
all_values_flag=False,
max_age_seconds=0,
type=None,
):
"""Set a filter for this sensor to be used in a question.
Args:
value (:obj:`str`):
Filter sensor rows returned on this value.
operator (:obj:`str`, optional):
Operator to use for filter_value.
Must be one of :data:`OPERATOR_MAP`.
Defaults to: "regex".
ignore_case_flag (:obj:`bool`, optional):
Ignore case when filtering on value.
Defaults to: True.
not_flag (:obj:`bool`, optional):
If set, negate the match.
Defaults to: False.
max_age_seconds (:obj:`int`, optional):
How old a sensor result can be before we consider it invalid.
0 means to use the max age property of the sensor.
Defaults to: 0.
all_values_flag (:obj:`bool`, optional):
Have filter match all values instead of any value.
Defaults to: False.
type (:obj:`str`, optional):
Have filter consider the value type as this.
Must be one of :data:`TYPE_MAP`
Defaults to: None.
"""
op_dict = get_operator_map(operator)
if type:
get_type_map(type)
self.filter.value = op_dict["tmpl"].format(value=value)
self.filter.operator = op_dict["op"]
self.filter.ignore_case_flag = ignore_case_flag
self.filter.not_flag = not_flag
self.filter.all_values_flag = all_values_flag
self.filter.max_age_seconds = max_age_seconds
self.filter.value_type = type
def build_select(self, set_param_defaults=True, allow_empty_params=False):
select = self.api_objects.Select()
select.filter = self.filter
select.sensor = self.api_objects.Sensor()
for key in self.params_defined:
if key not in self.param_values and set_param_defaults:
self.set_parameter(key=key, derive_default=True)
for param in self.params:
if param.value in ["", None] and not allow_empty_params:
m = "Parameter {p.key!r} value {p.value!r} is empty, definition: {d}"
m = m.format(p=param, d=self.params_defined.get(key, None))
raise exceptions.ModuleError(m)
if self.params:
select.sensor.parameters = self.params
select.sensor.source_id = self.obj.id
select.filter.sensor.id = self.obj.id
else:
select.sensor.hash = self.obj.hash
select.WORKFLOW = self
return select
class Question(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
atmpl = "{k}='{v}'".format
attrs = ["id", "query_text"]
bits = [atmpl(k=attr, v=getattr(self.obj, attr, None)) for attr in attrs]
bits += [atmpl(k=k, v=v) for k, v in self.expiration.items()]
bits = "(\n {},\n)".format(",\n ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@classmethod
def new(cls, adapter, lvl="info"):
"""Create a new Question workflow.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Question`
"""
return cls(obj=adapter.api_objects.Question(), adapter=adapter, lvl=lvl)
@classmethod
def get_by_id(cls, adapter, id, lvl="info"):
"""Get a question object by id.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
id (:obj:`int`):
id of question to fetch.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Question`
"""
result = adapter.cmd_get(obj=adapter.api_objects.Question(id=id))
return cls(adapter=adapter, obj=result(), lvl=lvl, result=result)
def _check_id(self):
"""Check that question has been asked by seeing if self.obj.id is set."""
if not self.obj.id:
m = "No id issued yet, ask the question!"
raise exceptions.ModuleError(m)
@property
def expiration(self):
"""Get expiration details for this question.
Returns:
:obj:`dict`
"""
now_dt = datetime.datetime.utcnow()
now_td = datetime.timedelta()
ret = {
"expiration": now_dt,
"expire_in": now_td,
"expire_ago": now_td,
"expired": True,
}
if self.obj.expiration:
ex_dt = self.api_objects.module_dt_format(self.obj.expiration)
is_ex = now_dt >= ex_dt
ret["expiration"] = ex_dt
ret["expired"] = is_ex
if is_ex:
ret["expire_ago"] = now_dt - ex_dt
else:
ret["expire_in"] = ex_dt - now_dt
return ret
def refetch(self):
"""Re-fetch this question."""
self._check_id()
result = self.adapter.cmd_get(obj=self.obj)
self._last_result = result
self.obj = result()
def ask(self, **kwargs):
"""Ask the question.
Args:
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_add`.
Notes:
If question has already been asked (id is set), we wipe out attrs:
["id", "context_group", "management_rights_group"], then add it.
"""
if self.obj.id:
wipe_attrs = ["id", "context_group", "management_rights_group"]
for attr in wipe_attrs:
setattr(self.obj, attr, None)
result = self.adapter.cmd_add(obj=self.obj, **kwargs)
self._last_result = result
self.obj = result()
self.refetch()
def add_left_sensor(
self, sensor, set_param_defaults=True, allow_empty_params=False
):
"""Add a sensor to the left hand side of the question.
Args:
sensor (:obj:`Sensor`):
Sensor workflow object.
set_param_defaults (:obj:`bool`, optional):
If sensor has parameters defined, and no value is set,
try to derive the default value from each parameters definition.
Defaults to: True.
allow_empty_params (:obj:`bool`, optional):
If sensor has parameters defined, and the value is not set, "", or None,
throw an exception.
Defaults to: True.
"""
select = sensor.build_select(
set_param_defaults=set_param_defaults, allow_empty_params=allow_empty_params
)
if not getattr(self.obj, "selects", None):
self.obj.selects = self.api_objects.SelectList()
self.obj.selects.append(select)
def answers_get_info(self, **kwargs):
"""Return the ResultInfo for this question.
Args:
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_info`.
Returns:
:obj:`tantrum.api_models.ApiModel`: ResultInfoList API Object
"""
self._check_id()
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
result = self.adapter.cmd_get_result_info(**cmd_args)
self._last_result = result
infos = result()
self._last_infos = infos
m = "Received answers info: {infos}"
m = m.format(infos=infos.serialize())
self.log.debug(m)
self.log.debug(format(self))
return infos
def answers_poll(
self,
poll_pct=99,
poll_secs=0,
poll_total=0,
poll_sleep=5,
max_poll_count=0,
**kwargs
):
"""Poll for answers from clients for this question.
Args:
poll_sleep (:obj:`int`, optional):
Check for answers every N seconds.
Defaults to: 5.
poll_pct (:obj:`int`, optional):
Wait until the percentage of clients total is N percent.
Defaults to: 99.
poll_secs (:obj:`int`, optional):
If not 0, wait until N seconds for pct of clients total instead of
until question expiration.
Defaults to: 0.
poll_total (:obj:`int`, optional):
If not 0, wait until N clients have total instead of
``estimated_total`` of clients from API.
Defaults to: 0.
max_poll_count (:obj:`int`, optional):
If not 0, only poll N times.
Defaults to: 0.
**kwargs:
rest of kwargs:
Passed to :meth:`answers_get_info`.
Returns:
:obj:`object`: ResultInfoList API object
"""
# TODO: Add wait till error_count / no_results_count == 0
self._check_id()
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
start = datetime.datetime.utcnow()
if poll_secs:
stop_dt = start + datetime.timedelta(seconds=poll_secs)
else:
stop_dt = self.expiration["expiration"]
m = "Start polling loop for answers until for {o} until {stop_dt}"
m = m.format(o=self, stop_dt=stop_dt)
self.log.debug(m)
infos = self.answers_get_info(**kwargs)
info = infos[0]
est_total = info.estimated_total
poll_total = est_total
if poll_total and poll_total <= est_total:
this_total = poll_total
now_pct = utils.tools.calc_percent(part=info.mr_passed, whole=this_total)
poll_count = 0
while True:
poll_count += 1
m = "New polling loop #{c} for {o}"
m = m.format(c=poll_count, o=self)
self.log.debug(m)
if now_pct >= poll_pct:
m = "Reached {now_pct} out of {pct}, considering all answers in"
m = m.format(now_pct=PCT_FMT(now_pct), pct=PCT_FMT(poll_pct))
self.log.info(m)
break
if datetime.datetime.utcnow() >= stop_dt:
m = "Reached stop_dt {stop_dt}, considering all answers in"
m = m.format(stop_dt=stop_dt)
self.log.info(m)
break
if self.expiration["expired"]:
m = "Reached expiration {expiration}, considering all answers in"
m = m.format(expiration=self.expiration)
self.log.info(m)
break
if max_poll_count and poll_count >= max_poll_count:
m = "Reached max poll count {c}, considering all answers in"
m = m.format(c=max_poll_count)
self.log.info(m)
break
infos = self.answers_get_info(**kwargs)
info = infos[0]
now_pct = utils.tools.calc_percent(part=info.mr_passed, whole=this_total)
m = [
"Answers in {now_pct} out of {pct}",
"{info.mr_passed} out of {this_total}",
"estimated_total: {info.estimated_total}",
"poll count: {c}",
]
m = ", ".join(m)
m = m.format(
now_pct=PCT_FMT(now_pct),
pct=PCT_FMT(poll_pct),
info=info,
this_total=this_total,
c=poll_count,
)
self.log.info(m)
time.sleep(poll_sleep)
end = datetime.datetime.utcnow()
elapsed = end - start
m = [
"Finished polling in: {dt}",
"clients answered: {info.mr_passed}",
"estimated clients: {info.estimated_total}",
"rows in answers: {info.row_count}",
"poll count: {c}",
]
m = ", ".join(m)
m = m.format(dt=elapsed, info=info, c=poll_count)
self.log.info(m)
return infos
def answers_get_data(self, hashes=False, **kwargs):
"""Get the answers for this question.
Args:
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values.
Defaults to: False.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Notes:
This will not use any paging, which means ALL answers will be returned
in one API response. For large data sets of answers, this is unwise.
Returns:
:obj:`tantrum.api_models.ApiModel`: ResultDataList API Object
"""
self._check_id()
start = datetime.datetime.utcnow()
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
end = datetime.datetime.utcnow()
elapsed = end - start
m = "Finished getting answers in {dt}"
m = m.format(dt=elapsed)
self.log.info(m)
datas = result()
self._last_datas = datas
return datas
def answers_get_data_paged(
self,
page_size=1000,
max_page_count=0,
max_row_count=0,
cache_expiration=900,
hashes=False,
sleep=5,
**kwargs
):
"""Get the answers for this question one page at a time.
Args:
page_size (:obj:`int`, optional):
Size of each page to fetch at a time.
Defaults to: 1000.
max_page_count (:obj:`int`, optional):
Only fetch up to this many pages. If 0, get all pages.
Defaults to: 0.
max_row_count (:obj:`int`, optional):
Only fetch up to this many rows.
Defaults to: 0.
cache_expiration (:obj:`int`, optional):
Have the API keep the cache_id that is created on initial get
answers page alive for N seconds.
Defaults to: 900.
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values
Defaults to: False.
sleep (:obj:`int`, optional):
Wait N seconds between fetching each page.
Defaults to: 5.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Notes:
If max_page_count and max_row_count are 0, fetch pages until a page
returns no answers or the expected row count is hit.
Returns:
:obj:`tantrum.api_models.ApiModel`: ResultDataList API Object
"""
self._check_id()
start = datetime.datetime.utcnow()
row_start = 0
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["row_start"] = row_start
cmd_args["row_count"] = page_size
cmd_args["cache_expiration"] = cache_expiration
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
datas = result()
self._last_datas = datas
data = datas[0]
cmd_args["cache_id"] = data.cache_id
cmd_args["row_start"] += page_size
m = [
"Received initial answers: {d.rows}",
"expected row_count: {d.row_count}",
"estimated total clients: {d.estimated_total}",
]
m = ", ".join(m)
m = m.format(d=data)
self.log.info(m)
all_rows = data.rows
page_count = 1
page_rows = all_rows
while True:
if len(all_rows or []) >= data.row_count:
m = "Received expected row_count {c}, considering all answers received"
m = m.format(c=data.row_count)
self.log.info(m)
break
if not page_rows:
m = "Received a page with no answers, considering all answers received"
self.log.info(m)
break
if max_page_count and page_count >= max_page_count:
m = "Reached max page count {c}, considering all answers in"
m = m.format(c=max_page_count)
self.log.info(m)
break
if max_row_count and len(all_rows or []) >= max_row_count:
m = "Hit max pages of {max_row_count}, considering all answers received"
m = m.format(max_row_count=max_row_count)
self.log.info(m)
page_count += 1
page_result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = page_result
# this should catch errors where API returns result data as None sometimes
# need to refetch data for N retries if that happens
page_datas = page_result()
self._last_datas = page_datas
page_data = page_datas[0]
page_rows = page_data.rows
m = "Received page #{c} answers: {rows}"
m = m.format(c=page_count, rows=len(page_rows or []))
self.log.info(m)
all_rows += page_rows
cmd_args["row_start"] += page_size
time.sleep(sleep)
end = datetime.datetime.utcnow()
elapsed = end - start
m = "Finished getting {rows} answers in {dt}"
m = m.format(rows=len(all_rows or []), dt=elapsed)
self.log.info(m)
return datas
def answers_sse_start_xml(self, hashes=False, **kwargs):
"""Start up a server side export for XML format and get an export_id.
Args:
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values
Defaults to: False.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Returns:
:obj:`str`:
"""
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["export_flag"] = True
cmd_args["export_format"] = 1
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
m = ["Received Server Side Export start response for XML format", "code={c}"]
m = ", ".join(m)
m = m.format(c=result.status_code)
self.log.debug(m)
export_id = result.object_obj["export_id"]
m = ["Started Server Side for XML format", "export_id={e!r}"]
m = ", ".join(m)
m = m.format(e=export_id)
self.log.info(m)
return export_id
def answers_sse_start_csv(
self, flatten=False, headers=True, hashes=False, **kwargs
):
"""Start up a server side export for CSV format and get an export_id.
Args:
flatten (:obj:`bool`, optional):
Flatten CSV rows if possible (single line in each cell)
Defaults to: False.
headers (:obj:`bool`, optional):
Include column headers.
Defaults to: True.
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values
Defaults to: False.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Returns:
:obj:`str`:
"""
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["export_flag"] = True
cmd_args["export_format"] = 3 if flatten else 0
cmd_args["export_hide_csv_header_flag"] = False if headers else True
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
m = ["Received Server Side Export start response for CSV format", "code={c}"]
m = ", ".join(m)
m = m.format(c=result.status_code)
self.log.debug(m)
export_id = result.object_obj["export_id"]
m = ["Started Server Side for CSV format", "export_id={e!r}"]
m = ", ".join(m)
m = m.format(e=export_id)
self.log.info(m)
return export_id
def answers_sse_start_cef(self, leading="", trailing="", **kwargs):
"""Start up a server side export for CEF format and get an export_id.
Args:
leading (:obj:`str`, optional):
Prepend this text to each line.
Defaults to: "".
trailing (:obj:`str`, optional):
Append this text to each line.
Defaults to: "".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Returns:
:obj:`str`:
"""
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["export_flag"] = True
cmd_args["export_format"] = 2
if leading:
cmd_args["export_leading_text"] = leading
if trailing:
cmd_args["export_trailing_text"] = trailing
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
m = ["Received Server Side Export start response for CEF format", "code={c}"]
m = ", ".join(m)
m = m.format(c=result.status_code)
self.log.debug(m)
export_id = result.object_obj["export_id"]
m = ["Started Server Side for CEF format", "export_id={e!r}"]
m = ", ".join(m)
m = m.format(e=export_id)
self.log.info(m)
return export_id
def answers_sse_get_status(self, export_id, **kwargs):
"""Get the status for this questions server side export.
Args:
export_id (:obj:`str`):
An export id returned from :meth:`sse_start`.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapters.ApiClient`.
Returns:
:obj:`dict`:
"""
client_args = {}
client_args.update(kwargs)
client_args["method"] = "get"
client_args["path"] = "export/{export_id}.status".format(export_id=export_id)
client_args["data"] = ""
r = self.adapter.api_client(**client_args)
status_split = [x.strip().lower() for x in r.text.split(".") if x.strip()]
status = dict(zip(["status", "progress"], status_split))
status["export_id"] = export_id
m = [
"Received SSE status response: path={r.request.url!r}",
"code={r.status_code}",
"status={status}",
]
m = ", ".join(m)
m = m.format(r=r, status=status)
self.log.debug(m)
return status
def answers_sse_poll(self, export_id, poll_sleep=5, max_poll_count=0, **kwargs):
"""Poll a server side export for completion.
Args:
export_id (:obj:`str`):
An export id returned from :meth:`answers_sse_start_xml` or
:meth:`answers_sse_start_csv` or :meth:`answers_sse_start_cef`.
poll_sleep (:obj:`int`, optional):
Check for answers every N seconds.
Defaults to: 5.
max_poll_count (:obj:`int`, optional):
If not 0, only poll N times.
Defaults to: 0.
**kwargs:
rest of kwargs:
Passed to :meth:`answers_sse_get_status`.
Returns:
:obj:`str`:
"""
self._check_id()
start = datetime.datetime.utcnow()
poll_count = 0
sse_args = {}
sse_args.update(kwargs)
sse_args["export_id"] = export_id
status = self.answers_sse_get_status(**sse_args)
while True:
poll_count += 1
if max_poll_count and poll_count >= max_poll_count:
m = [
"Server Side Export completed",
"reached max poll count {c}",
"status {status}",
]
m = ", ".join(m)
m = m.format(c=max_poll_count, status=status)
self.log.info(m)
break
if status["status"] == "completed":
m = "Server Side Export completed: {status}"
m = m.format(status=status)
self.log.info(m)
break
if status["status"] == "failed":
m = "Server Side Export failed: {status}"
m = m.format(status=status)
raise exceptions.ModuleError(m)
time.sleep(poll_sleep)
status = self.answers_sse_get_status(**sse_args)
end = datetime.datetime.utcnow()
elapsed = end - start
m = "Finished polling for Server Side Export in {dt}, {status}"
m = m.format(dt=elapsed, status=status)
self.log.info(m)
return status
def answers_sse_get_data(
self, export_id, return_dict=False, return_obj=True, **kwargs
):
"""Get the answers for this question in XML format using server side export.
Args:
export_id (:obj:`str`):
An export id returned from :meth:`sse_start`.
return_dict (:obj:`bool`, optional):
If export_id is an XML format, return a dictionary object.
Defaults to: False.
return_obj (:obj:`bool`, optional):
If export_id is XML format, return a ResultSet object.
Defaults to: True.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapters.ApiClient`.
Notes:
If export_id is not XML format or return_dict and return_obj False,
return the raw text as is.
Returns:
:obj:`tantrum.api_models.ApiModel` or :obj:`dict` or :obj:`str`:
If return_obj = True returns ResultSetList ApiModel object.
If return_dict = True returns dict.
Otherwise, return str.
"""
self._check_id()
client_args = {}
client_args.update(kwargs)
client_args["method"] = "get"
client_args["path"] = "export/{export_id}.gz".format(export_id=export_id)
client_args["data"] = ""
r = self.adapter.api_client(**client_args)
m = ["Received SSE data response", "code: {r.status_code}", "export_id: {e!r}"]
m = ", ".join(m)
m = m.format(r=r, e=export_id)
self.log.info(m)
data = r.text
if "xml" in export_id and (return_dict or return_obj):
result = results.Soap(
api_objects=self.api_objects,
response_body=r.text,
request_body=r.request.body,
method=r.request.method,
url=r.request.url,
status_code=r.status_code,
origin=r,
lvl=self.log.level,
)
data = "<{r}>{data}</{r}>".format(data=data, r="result_set")
src = "SSE get data response"
data = result.str_to_obj(text=data, src=src, try_int=False)
if return_dict:
return data
data = self.api_objects.ResultSet(**data["result_set"])
data = self.api_objects.ResultSetList(*[data])
return data
return data
class ParsedQuestion(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
bits = [
"parse matches: {c}".format(c=len(self.obj)),
"has exact match: {em}".format(em=True if self.get_canonical else False),
]
bits = "({})".format(", ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@property
def get_canonical(self):
"""Return any parse result that is an exact match."""
for x in self.obj:
if x.question.from_canonical_text:
return x
return None
def map_select_params(self, pq):
"""Map parameters to sensors on the left hand side of the question."""
param_cls = self.api_objects.Parameter
param_values = pq.parameter_values
selects = pq.question.selects or []
for select in selects:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
sensor = select.sensor
if not sensor.parameter_definition:
m = "No parameters defined on sensor {s}, going to next"
m = m.format(s=sensor)
self.log.debug(m)
continue
sensor.source_id = sensor.id
sensor.id = None
sensor.parameters = self.api_objects.ParameterList()
params = json.loads(sensor.parameter_definition)["parameters"]
for param in params:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
key = "||{}||".format(param["key"])
value = param_values.pop(0)
sensor.parameters.append(param_cls(key=key, value=value))
m = "Mapped parameter {k!r}='{v}' for {s}"
m = m.format(k=key, v=value, s=sensor)
self.log.debug(m)
def map_group_params(self, pq, group):
"""Map parameters to filters on the right hand side of the question."""
param_cls = self.api_objects.Parameter
group_sensors = pq.question_group_sensors
param_values = pq.parameter_values
if not group:
m = "Empty group, not mapping group params"
self.log.debug(m)
return
if not group_sensors:
m = "No question group sensors defined, not mapping group params"
self.log.debug(m)
return
for group_filter in group.filters or []:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
m = "Now mapping parameters for group filter: {gf}"
m = m.format(gf=group_filter)
self.log.debug(m)
sensor_id = group_filter.sensor.id
sensor = [x for x in group_sensors if x.id == sensor_id][0]
if not sensor.parameter_definition:
m = "No parameters defined on sensor {s}, going to next"
m = m.format(s=sensor)
self.log.debug(m)
continue
sensor.source_id = sensor.id
sensor.id = None
sensor.parameters = self.api_objects.ParameterList()
params = json.loads(sensor.parameter_definition)["parameters"]
for param in params:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
key = "||{}||".format(param["key"])
value = param_values.pop(0)
sensor.parameters.append(param_cls(key=key, value=value))
m = "Mapped parameter {k!r}='{v}' for {s}"
m = m.format(k=key, v=value, s=sensor)
self.log.debug(m)
group_filter.sensor = sensor
for sub_group in group.sub_groups or []:
self.map_group_params(pq, sub_group)
@property
def result_indexes(self):
"""Get the parse result indices in str form."""
pq_tmpl = " index: {idx}, result: {text!r}, params: {params}, exact: {exact}"
pq_tmpl = pq_tmpl.format
pq_list = []
for idx, pq in enumerate(self.obj):
pq_txt = pq_tmpl(
idx=idx,
text=pq.question_text,
params=list(pq.parameter_values or []),
exact=bool(pq.question.from_canonical_text),
)
pq_list.append(pq_txt)
return "\n".join(pq_list)
def pick(self, index=None, use_exact_match=True, use_first=False, **kwargs):
"""Pick a parse result and ask it.
Args:
index (:obj:`int`, optional):
Index of parse result to ask.
Defaults to: None.
use_exact_match (:obj:`bool`, optional):
If index is None and one of the parse results is an exact match,
pick and ask it.
Defaults to: True.
use_first (:obj:`bool`, optional):
If index is None and there is no exact match,
pick the first parse result and ask it.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_add_parsed_question`.
Returns:
:obj:`Question`
"""
if index:
pq = self.obj[index]
m = "Picking parsed question based on index {index}: {pq.question}"
m = m.format(index=index, pq=pq)
self.log.info(m)
elif use_exact_match and self.get_canonical:
pq = self.get_canonical
m = "Picking parsed question based on exact match: {pq.question}"
m = m.format(pq=pq)
self.log.info(m)
elif use_first:
pq = self.obj[0]
m = "Picking first matching parsed question: {pq.question}"
m = m.format(pq=pq)
self.log.info(m)
else:
err = [
"No index supplied",
"no exact matching parsed result",
"and use_first is False!",
]
err = ", ".join(err)
err = [err, "Supply an index of a parsed result:", self.result_indexes]
err = "\n".join(err)
raise exceptions.ModuleError(err)
self.map_select_params(pq=pq)
m = "Finished mapping parameters for selects, parameter values left: {pv!r}"
m = m.format(pv=pq.parameter_values)
self.log.debug(m)
self.map_group_params(pq=pq, group=pq.question.group)
m = "Finished mapping parameters for groups, parameter values left: {pv!r}"
m = m.format(pv=pq.parameter_values)
self.log.debug(m)
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = pq
result = self.adapter.cmd_add_parsed_question(**cmd_args)
result_obj = result()
workflow = Question(
adapter=self.adapter, obj=result_obj, lvl=self.log.level, result=result
)
m = "Added parsed question: {w}"
m = m.format(w=workflow)
self.log.info(m)
workflow.refetch()
return workflow
@classmethod
def parse(cls, adapter, text, lvl="info", **kwargs):
"""Get parse results of text from API.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
text (:obj:`str`):
Question text to parse.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_parse_question`.
Returns:
:obj:`ParsedQuestion`
"""
log = utils.logs.get_obj_log(obj=cls, lvl=lvl)
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["text"] = text
result = adapter.cmd_parse_question(**cmd_args)
result_obj = result()
if result_obj is None:
m = "No parse results returned for text: {t!r}"
m = m.format(t=text)
raise exceptions.ModuleError(m)
any_canonical = any([x.question.from_canonical_text for x in result_obj])
m = "Received {n} parse results (any exact match: {ac})"
m = m.format(n=len(result_obj), ac=any_canonical)
log.info(m)
return cls(adapter=adapter, obj=result_obj, lvl=lvl, result=result)
OPERATOR_MAP = {
"less": {"op": "Less", "tmpl": "{value}"},
"lessequal": {"op": "LessEqual", "tmpl": "{value}"},
"greater": {"op": "Greater", "tmpl": "{value}"},
"greaterequal": {"op": "GreaterEqual", "tmpl": "{value}"},
"equal": {"op": "Equal", "tmpl": "{value}"},
"regex": {"op": "RegexMatch", "tmpl": "{value}"},
"startswith": {"op": "RegexMatch", "tmpl": ".*{value}"},
"endswith": {"op": "RegexMatch", "tmpl": "{value}.*"},
"contains": {"op": "RegexMatch", "tmpl": ".*{value}.*"},
"hash": {"op": "HashMatch", "tmpl": "{value}"},
}
TYPE_MAP = {
"Hash": 0,
# SENSOR_RESULT_TYPE_STRING
"String": 1,
# SENSOR_RESULT_TYPE_VERSION
"Version": 2,
# SENSOR_RESULT_TYPE_NUMERIC
"NumericDecimal": 3,
# SENSOR_RESULT_TYPE_DATE_BES
"BESDate": 4,
# SENSOR_RESULT_TYPE_IPADDRESS
"IPAddress": 5,
# SENSOR_RESULT_TYPE_DATE_WMI
"WMIDate": 6,
# e.g. "2 years, 3 months, 18 days, 4 hours, 22 minutes:
# 'TimeDiff', and 3.67 seconds" or "4.2 hours"
# (numeric + "Y|MO|W|D|H|M|S" units)
"TimeDiff": 7,
# e.g. 125MB or 23K or 34.2Gig (numeric + B|K|M|G|T units)
"DataSize": 8,
"NumericInteger": 9,
"VariousDate": 10,
"RegexMatch": 11,
"LastOperatorType": 12,
}
PCT_FMT = "{0:.0f}%".format
def get_operator_map(operator):
"""Validate operator against :data:`OPERATOR_MAP`."""
if operator in OPERATOR_MAP:
return OPERATOR_MAP[operator]
m = "Operator {o!r} is invalid, must be one of {vo}"
m = m.format(o=operator, vo=list(OPERATOR_MAP.keys()))
raise exceptions.ModuleError(m)
def get_type_map(type):
"""Validate type against :data:`TYPE_MAP`."""
if type in TYPE_MAP:
return TYPE_MAP[type]
m = "Type {o!r} is invalid, must be one of {vo}"
m = m.format(o=type, vo=list(TYPE_MAP.keys()))
raise exceptions.ModuleError(m)
|
[
"json.loads",
"time.sleep",
"datetime.datetime.utcnow",
"datetime.timedelta",
"collections.OrderedDict"
] |
[((3276, 3302), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3300, 3302), False, 'import datetime\n'), ((11258, 11307), 'json.loads', 'json.loads', (["(self.obj.parameter_definition or '{}')"], {}), "(self.obj.parameter_definition or '{}')\n", (11268, 11307), False, 'import json\n'), ((11852, 11894), 'collections.OrderedDict', 'OrderedDict', (["((p['key'], p) for p in params)"], {}), "((p['key'], p) for p in params)\n", (11863, 11894), False, 'from collections import OrderedDict\n'), ((12066, 12079), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12077, 12079), False, 'from collections import OrderedDict\n'), ((20147, 20173), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (20171, 20173), False, 'import datetime\n'), ((20191, 20211), 'datetime.timedelta', 'datetime.timedelta', ([], {}), '()\n', (20209, 20211), False, 'import datetime\n'), ((25057, 25083), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (25081, 25083), False, 'import datetime\n'), ((27597, 27623), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (27621, 27623), False, 'import datetime\n'), ((28764, 28790), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (28788, 28790), False, 'import datetime\n'), ((29043, 29069), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (29067, 29069), False, 'import datetime\n'), ((30943, 30969), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (30967, 30969), False, 'import datetime\n'), ((33559, 33585), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (33583, 33585), False, 'import datetime\n'), ((40033, 40059), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (40057, 40059), False, 'import datetime\n'), ((41201, 41227), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (41225, 41227), False, 'import datetime\n'), ((27559, 27581), 'time.sleep', 'time.sleep', (['poll_sleep'], {}), '(poll_sleep)\n', (27569, 27581), False, 'import time\n'), ((33526, 33543), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (33536, 33543), False, 'import time\n'), ((41102, 41124), 'time.sleep', 'time.sleep', (['poll_sleep'], {}), '(poll_sleep)\n', (41112, 41124), False, 'import time\n'), ((8350, 8367), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (8360, 8367), False, 'import time\n'), ((25137, 25174), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'poll_secs'}), '(seconds=poll_secs)\n', (25155, 25174), False, 'import datetime\n'), ((26169, 26195), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (26193, 26195), False, 'import datetime\n'), ((45487, 45526), 'json.loads', 'json.loads', (['sensor.parameter_definition'], {}), '(sensor.parameter_definition)\n', (45497, 45526), False, 'import json\n'), ((47443, 47482), 'json.loads', 'json.loads', (['sensor.parameter_definition'], {}), '(sensor.parameter_definition)\n', (47453, 47482), False, 'import json\n')]
|
import subprocess
import base64
import json
import re
import hashlib
import tempfile
import os
from lxml import etree
import pprint
from math_tools import percentile
def get_blast_databases(exe_loc, db_loc):
"""
Look for BLAST databases using in given path and return a list
Args:
exe_loc: Location (directory) of the BLAST executables.
db_loc: Directory containing the BLAST DB.
Returns:
A dict containing lists of databases available.
# Test it!
>>> get_blast_databases('/Users/work/Projects/pyBlast/bin/', '/Users/work/Projects/pyBlast/db/')
{'protein': [{'location': '/Users/work/Projects/pyBlast/db/yeast.aa', 'title': 'yeast.aa'}], 'nucleotide': [{'location': '/Users/work/Projects/pyBlast/db/yeast.nt', 'title': 'yeast.nt'}]}
"""
found = subprocess.check_output([exe_loc+'blastdbcmd', '-list', db_loc, '-list_outfmt', "'%f %p %t'"])
try:
found = subprocess.check_output([exe_loc+'blastdbcmd', '-list', db_loc, '-list_outfmt', "'%f %p %t'"])
except:
found = ''
found = [entry.split(' ',2) for entry in re.split(r'\n', re.sub(r'\'', '', found)) if len(entry) > 1]
databases = {}
for f in found:
if f[1].lower() not in databases:
databases[f[1].lower()] = []
databases[f[1].lower()].append({'location': f[0], 'title': f[2]})
return databases
def get_blast_database_from_title(exe_loc, db_loc, title):
"""
For a give title get the actual name of the database (it may differ from title)
Args:
exe_loc: Location (directory) of the BLAST executables.
db_loc: Directory containing the BLAST DB.
title: The title of the BLAST database to search for.
Returns:
The location of the BLAST database.
"""
database_list = get_blast_databases(exe_loc, db_loc)
flat = []
for k,l in database_list.iteritems():
flat.extend(l)
for d in flat:
if title == d['title']:
return d['location']
return False
def get_sequence_from_database(exe_loc, db, seq_id):
"""
Extract a sequence from the given BLAST database and return it
Args:
exe_loc: Directory containing BLAST executables.
db: The database to get sequence from.
seq_id: The sequence ID of the sequence to get.
Returns:
The sequence if found else an empty string
# Test:
>>> get_sequence_from_database('/Users/work/Projects/pyBlast/bin/', '/Users/work/Projects/pyBlast/db/yeast.nt', 'gi|6226515|ref|NC_001224.1|')
"""
try:
found = subprocess.check_output([exe_loc+'blastdbcmd', '-db', db, '-entry', seq_id])
except:
found = ''
return found
def parse_extra_options(option_string, exclude=[]):
"""
Create an list of options filtering out excluded options
Args:
option_string: A string containing extra blast options.
exclude: Options to exclude from the generated list.
Returns:
A list of options except those in exclude
"""
options = re.findall(r'((-\w+) ([\w\d\.]+)?)\s?', option_string)
extras = []
for o in options:
if o[1] not in exclude:
extras.extend(o[1:])
return extras
def run_blast(database, program, filestore, file_uuid, sequence, options):
"""
Perform a BLAST search on the given database using the given query
Args:
database: The database to search (full path).
program: The program to use (e.g. BLASTN, TBLASTN, BLASTX).
filestore: The directory to store the XML output.
file_uuid: A unique identifier for the filename.
sequence: The sequence to BLAST.
options: Any extra options to pass to the BLAST executable.
Returns:
A tuple containing the stdout and stderr of the program.
# Test:
>>> seq = ">test\\nTTCATAATTAATTTTTTATATATATATTATATTATAATATTAATTTATATTATAAAAATAATATTTATTATTAAAATATT\\nTATTCTCCTTTCGGGGTTCCGGCTCCCGTGGCCGGGCCCCGGAATTATTAATTAATAATAAATTATTATTAATAATTATT\\n>test 2\\nAATGGTATTAGATTCAGTGAATTTGGTACAAGACGTCGTAGATCTCTGAAGGCTCAAGATCTAATTATGCAAGGAATCATGAAAGCTGTGAACGGTAACCCAGACAGAAACAAATCGCTATTATTAGGCACATCAAATATTTTATTTGCCAAGAAATATGGAGTCAAGCCAATCGGTACTGTGGCTCACGAGTGGGTTATGGGAGTCGCTTCTATTAGTGAAGATTATTTGCATGCCAATAAAAATGCAATGGATTGTTGGATCAATACTTTTGGTGCAAAAAATGCTGGTTTAGCATTAACGGATACTTTTGGAACTGATGACTTTTTAAAATCATTCCGTCCACCATATTCTGATGCTTACGTCGGTGTTAGACAAGATTCTGGAGACCCAGTTGAGTATACCAAAAAGATTTCCCACCATTACCATGACGTGTTGAAATTGCCTAAATTCTCGAAGATTATCTGTTATTCCGATTCTTTGAACGTCGAAAAGGCAATAACTTACTCCCATGCAGCTAAAGAGAATG"
>>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'})
>>> seq = ">test\\nTTC"
>>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'})
"""
query = [program, '-db', database, '-outfmt', '5', '-query', '-', '-out', "{0}{1}.xml".format(filestore, file_uuid), '-max_target_seqs', '50']
exclude = [
'-db',
'-query',
'-out',
'-subject',
'-html',
'-gilist',
'-negative_gilist',
'-entrez_query',
'-remote',
'-outfmt',
'-num_threads',
'-import_search_strategy',
'-export_search_strategy',
'-window_masker_db',
'-index_name',
'-use_index',
]
extra = parse_extra_options(options, exclude)
query.extend(extra)
p = subprocess.Popen(query, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
stdout, stderr = p.communicate(sequence)
return (stdout, stderr)
def poll(name):
"""
Check if the file <name> has been created, indicating BLAST has finished, and return results
Args:
name: The filename of the file that was created in a BLAST search.
Returns:
The file or False if it has not yet been created.
"""
try:
with open(name) as results:
if os.path.getsize(name) > 0:
return results.read()
raise IOError
except IOError:
return False
def chunk_string(s, l=10):
"""
Split a string into chunks of a set length.
Args:
s: The string to chunk.
l: The length of the chunks.
Returns:
A list containing the string chunks.
"""
return [s[i:i+l] for i in range(0,len(s),l)]
def format_bases(bases):
"""
Generate HTML that colours the bases in a string.
Args:
bases: A string containing a genetic sequence.
Returns:
An HTML string.
"""
formatted = ''
for b in bases:
formatted += '<span class="base-{}">{}</span>'.format(b,b)
return formatted
def create_formatted_sequences(hsp):
"""
Take a sequence and format it for display.
Args:
hsp: A dict containing the sequence information.
Returns:
An HTML string of the formatted sequence.
"""
cl = 60
query = chunk_string(hsp['query_seq'], cl)
match = chunk_string(hsp['midline'], cl)
subject = chunk_string(hsp['hit_seq'], cl)
output = ""
for ln, line in enumerate(query):
query_from = int(hsp['query_from']) if ln == 0 else int(hsp['query_from'])+(ln*cl)
query_to = query_from+(cl-1)
subject_from = int(hsp['hit_from']) if ln == 0 else int(hsp['hit_from'])+(ln*cl)
subject_to = subject_from+(cl-1)
qseq = format_bases(line)
sseq = format_bases(subject[ln])
output += '''
<div class="row">
<pre class="col-xs-1 seq-col-sm">Query
Subject
</pre>
<pre class="col-xs-1 seq-col-sm">{qsnum}
{ssnum}
</pre>
<pre class="col-xs-7 seq-col-lg">{qseq}
{match}
{sseq}
</pre>
<pre class="col-xs-1 seq-col-sm">{qenum}
{senum}
</pre>
</div>
'''.format(qseq=qseq,
match=match[ln],
sseq=sseq,
qsnum=str(query_from),
qenum=query_to,
ssnum=str(subject_from),
senum=subject_to
)
return output.rstrip()
def process_blast_result(filecontents, cutoff=0.0001):
"""
Take a BLAST XML results file and process into a usable dict.
Args:
filecontents: The contents of a BLAST XML file.
cutoff: The cutoff for which a sequence is considered relevant.
Returns:
A dict of the results.
"""
results = {'results':[], 'messages':[]}
messages = []
b = etree.fromstring(filecontents)
# Get BLAST details
db_loc = b.xpath('string(BlastOutput_db/text())').split('/')
results['details'] = {
'program': b.xpath('string(BlastOutput_program/text())'),
'version': b.xpath('string(BlastOutput_version/text())'),
'reference': b.xpath('string(BlastOutput_reference/text())'),
'db': db_loc[-1],
'query_id': b.xpath('string(BlastOutput_query-ID/text())'),
'query_def': b.xpath('string(BlastOutput_query-def/text())'),
'query_length': b.xpath('string(BlastOutput_query-len/text())'),
'params': {},
}
for t in b.findall('BlastOutput_param/Parameters/*'):
name = t.tag.split('_', 1)
results['details']['params'][name[-1]] = t.text
for it in b.findall('BlastOutput_iterations/Iteration'):
# The file may contain a message, stor that for later use
if it.find('.//Iteration_message') is not None:
results['messages'].append(it.find('.//Iteration_message').text)
else:
r = {
'details': {
'id': it.xpath('string(Iteration_query-ID/text())'),
'def': it.xpath('string(Iteration_query-def/text())'),
'length': it.xpath('string(Iteration_query-len/text())'),
},
'statistics': {
'db_num': b.xpath('string(Iteration_stat/Statistics/Statistics_db-num/text())'),
'db_length': b.xpath('string(Iteration_stat/Statistics/Statistics_db-len/text())'),
'hsp_length': b.xpath('string(Iteration_stat/Statistics/Statistics_hsp-len/text())'),
'eff_space': b.xpath('string(Iteration_stat/Statistics/Statistics_eff-space/text())'),
'kappa': b.xpath('string(Iteration_stat/Statistics/Statistics_kappa/text())'),
'lambda': b.xpath('string(Iteration_stat/Statistics/Statistics_lambda/text())'),
'entropy': b.xpath('string(Iteration_stat/Statistics/Statistics_entropy/text())'),
},
'hits': []
}
for ht in it.findall('Iteration_hits/Hit'):
h = {
'num': ht.xpath('string(Hit_num/text())'),
'id': ht.xpath('string(Hit_id/text())'),
'def': ht.xpath('string(Hit_def/text())'),
'accession': ht.xpath('string(Hit_accession/text())'),
'length': ht.xpath('string(Hit_len/text())'),
'hsps': [],
}
query_from = []
query_to = []
for hs in ht.findall('.//Hsp'):
hsp = {
'num': hs.xpath('string(Hsp_num/text())'),
'bit_score': hs.xpath('string(Hsp_bit-score/text())'),
'score': hs.xpath('string(Hsp_score/text())'),
'evalue': hs.xpath('string(Hsp_evalue/text())'),
'query_from': hs.xpath('string(Hsp_query-from/text())'),
'query_to': hs.xpath('string(Hsp_query-to/text())'),
'hit_from': hs.xpath('string(Hsp_hit-from/text())'),
'hit_to': hs.xpath('string(Hsp_hit-to/text())'),
'query_frame': hs.xpath('string(Hsp_query-frame/text())'),
'hit_frame': hs.xpath('string(Hsp_hit-frame/text())'),
'identity': hs.xpath('string(Hsp_identity/text())'),
'positive': hs.xpath('string(Hsp_positive/text())'),
'gaps': hs.xpath('string(Hsp_gaps/text())'),
'align_length': hs.xpath('string(Hsp_align-len/text())'),
'query_seq': hs.xpath('string(Hsp_qseq/text())'),
'hit_seq': hs.xpath('string(Hsp_hseq/text())'),
'midline': hs.xpath('string(Hsp_midline/text())'),
}
hsp['identity_percent'] = int(hsp['identity'])/float(hsp['align_length'])*100
hsp['gaps_percent'] = int(hsp['gaps'])/float(hsp['align_length'])*100
if float(hsp['evalue']) < cutoff: #float(hsp['bit_score']) > bit_score_filter:
query_from.append(int(hsp['query_from']))
query_to.append(int(hsp['query_to']))
hsp['formatted'] = create_formatted_sequences(hsp)
hsp['query_chunk'] = chunk_string(hsp['query_seq'], 60)
hsp['match_chunk'] = chunk_string(hsp['midline'], 60)
hsp['subject_chunk'] = chunk_string(hsp['hit_seq'], 60)
h['hsps'].append(hsp)
if len(h['hsps']) > 0:
if sum(query_from) > sum(query_to):
h['query_from'] = max(query_from)
h['query_to'] = min(query_to)
else:
h['query_from'] = min(query_from)
h['query_to'] = max(query_to)
r['hits'].append(h)
results['results'].append(r)
return results
|
[
"subprocess.Popen",
"lxml.etree.fromstring",
"os.path.getsize",
"subprocess.check_output",
"re.findall",
"re.sub"
] |
[((811, 911), 'subprocess.check_output', 'subprocess.check_output', (['[exe_loc + \'blastdbcmd\', \'-list\', db_loc, \'-list_outfmt\', "\'%f %p %t\'"]'], {}), '([exe_loc + \'blastdbcmd\', \'-list\', db_loc,\n \'-list_outfmt\', "\'%f %p %t\'"])\n', (834, 911), False, 'import subprocess\n'), ((3052, 3110), 're.findall', 're.findall', (['"""((-\\\\w+) ([\\\\w\\\\d\\\\.]+)?)\\\\s?"""', 'option_string'], {}), "('((-\\\\w+) ([\\\\w\\\\d\\\\.]+)?)\\\\s?', option_string)\n", (3062, 3110), False, 'import re\n'), ((5594, 5704), 'subprocess.Popen', 'subprocess.Popen', (['query'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'bufsize': '(-1)'}), '(query, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, bufsize=-1)\n', (5610, 5704), False, 'import subprocess\n'), ((8501, 8531), 'lxml.etree.fromstring', 'etree.fromstring', (['filecontents'], {}), '(filecontents)\n', (8517, 8531), False, 'from lxml import etree\n'), ((931, 1031), 'subprocess.check_output', 'subprocess.check_output', (['[exe_loc + \'blastdbcmd\', \'-list\', db_loc, \'-list_outfmt\', "\'%f %p %t\'"]'], {}), '([exe_loc + \'blastdbcmd\', \'-list\', db_loc,\n \'-list_outfmt\', "\'%f %p %t\'"])\n', (954, 1031), False, 'import subprocess\n'), ((2583, 2661), 'subprocess.check_output', 'subprocess.check_output', (["[exe_loc + 'blastdbcmd', '-db', db, '-entry', seq_id]"], {}), "([exe_loc + 'blastdbcmd', '-db', db, '-entry', seq_id])\n", (2606, 2661), False, 'import subprocess\n'), ((1118, 1142), 're.sub', 're.sub', (['"""\\\\\'"""', '""""""', 'found'], {}), '("\\\\\'", \'\', found)\n', (1124, 1142), False, 'import re\n'), ((6122, 6143), 'os.path.getsize', 'os.path.getsize', (['name'], {}), '(name)\n', (6137, 6143), False, 'import os\n')]
|
from distutils.core import setup
import py2exe
setup(console=['Single Stock Scraper.py'])
|
[
"distutils.core.setup"
] |
[((51, 93), 'distutils.core.setup', 'setup', ([], {'console': "['Single Stock Scraper.py']"}), "(console=['Single Stock Scraper.py'])\n", (56, 93), False, 'from distutils.core import setup\n')]
|
'''
Created on 02-Jul-2016
@author: <NAME>
@version: 1.0
@since: 1.0
'''
from flask_sqlalchemy import SQLAlchemy
from restful.tiny_routes import app
db = SQLAlchemy(app)
class UrlMap(db.Model):
'''
A model responsible for storing shortened to long url mapping.
'''
id = db.Column('id', db.Integer, primary_key = True)
uuid = db.Column('uuid', db.Integer, unique = True)
short_url = db.Column('short_url', db.String(255), unique = True)
url = db.Column('url', db.String(255), unique = True)
def __init__(self, uuid, short_url, url):
'''
Constructor
'''
self.uuid = uuid
self.short_url = short_url
self.url = url
|
[
"flask_sqlalchemy.SQLAlchemy"
] |
[((156, 171), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (166, 171), False, 'from flask_sqlalchemy import SQLAlchemy\n')]
|
from twitter_ads.campaign import Tweet
from twitter_ads.client import Client
from twitter_ads.creative import MediaLibrary, PollCard
from twitter_ads.enum import MEDIA_TYPE
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
ACCOUNT_ID = ''
# initialize the client
client = Client(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# load the advertiser account instance
account = client.accounts(ACCOUNT_ID)
# most recent Media Library video
ml = MediaLibrary(account).all(account, media_type=MEDIA_TYPE.VIDEO)
media_key = ml.first.media_key
# create Poll Card with video
pc = PollCard(account)
pc.duration_in_minutes = 10080 # one week
pc.first_choice = 'Northern'
pc.second_choice = 'Southern'
pc.name = ml.first.name + ' poll card from SDK'
pc.media_key = media_key
pc.save()
# create Tweet
Tweet.create(account, text='Which hemisphere do you prefer?', card_uri=pc.card_uri)
# https://twitter.com/apimctestface/status/973002610033610753
|
[
"twitter_ads.creative.MediaLibrary",
"twitter_ads.client.Client",
"twitter_ads.campaign.Tweet.create",
"twitter_ads.creative.PollCard"
] |
[((307, 379), 'twitter_ads.client.Client', 'Client', (['CONSUMER_KEY', 'CONSUMER_SECRET', 'ACCESS_TOKEN', 'ACCESS_TOKEN_SECRET'], {}), '(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n', (313, 379), False, 'from twitter_ads.client import Client\n'), ((629, 646), 'twitter_ads.creative.PollCard', 'PollCard', (['account'], {}), '(account)\n', (637, 646), False, 'from twitter_ads.creative import MediaLibrary, PollCard\n'), ((847, 935), 'twitter_ads.campaign.Tweet.create', 'Tweet.create', (['account'], {'text': '"""Which hemisphere do you prefer?"""', 'card_uri': 'pc.card_uri'}), "(account, text='Which hemisphere do you prefer?', card_uri=pc.\n card_uri)\n", (859, 935), False, 'from twitter_ads.campaign import Tweet\n'), ((498, 519), 'twitter_ads.creative.MediaLibrary', 'MediaLibrary', (['account'], {}), '(account)\n', (510, 519), False, 'from twitter_ads.creative import MediaLibrary, PollCard\n')]
|
# Generated by Django 2.2.7 on 2020-01-13 02:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0003_auto_20200113_0225'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='user',
),
migrations.AddField(
model_name='photo',
name='title',
field=models.CharField(blank=True, max_length=255),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] |
[((232, 287), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""photo"""', 'name': '"""user"""'}), "(model_name='photo', name='user')\n", (254, 287), False, 'from django.db import migrations, models\n'), ((429, 473), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)'}), '(blank=True, max_length=255)\n', (445, 473), False, 'from django.db import migrations, models\n')]
|
import sys
import logging
import click
import entrypoints
LOG_LEVEL_CODES = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
}
def merge_extensions(click_group):
"""
Each extension is called with click group for
ultimate agility while preserving cli context.
"""
for extension in load_extensions():
extension(click_group)
return click_group
def load_extensions():
"""Return list of Kibitzr CLI extensions"""
return [
point.load()
for point in entrypoints.get_group_all("kibitzr.cli")
]
@click.group()
@click.option("-l", "--log-level", default="info",
type=click.Choice(LOG_LEVEL_CODES.keys()),
help="Logging level")
@click.pass_context
def cli(ctx, log_level):
"""Run kibitzr COMMAND --help for detailed descriptions"""
ctx.obj = {'log_level': LOG_LEVEL_CODES[log_level.lower()]}
@cli.command()
def version():
"""Print version"""
from kibitzr import __version__ as kibitzr_version
print(kibitzr_version)
@cli.command()
def firefox():
"""Launch Firefox with persistent profile"""
from kibitzr.app import Application
Application().run_firefox()
@cli.command()
@click.argument('name', nargs=-1)
@click.pass_context
def once(ctx, name):
"""Run kibitzr checks once and exit"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=True, log_level=ctx.obj['log_level'], names=name))
@cli.command()
@click.argument('name', nargs=-1)
@click.pass_context
def run(ctx, name):
"""Run kibitzr in the foreground mode"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=False, log_level=ctx.obj['log_level'], names=name))
@cli.command()
def init():
"""Create boilerplate configuration files"""
from kibitzr.app import Application
Application.bootstrap()
@cli.command()
def telegram_chat():
"""Return chat id for the last message sent to Telegram Bot"""
# rename import to escape name clashing:
from kibitzr.app import Application
app = Application()
app.telegram_chat()
@cli.command()
def clean():
"""Clean change history"""
from kibitzr.storage import PageHistory
PageHistory.clean()
@cli.command()
def stash():
"""Print stash contents"""
from kibitzr.stash import Stash
Stash.print_content()
extended_cli = merge_extensions(cli)
if __name__ == "__main__":
extended_cli()
|
[
"entrypoints.get_group_all",
"click.argument",
"kibitzr.storage.PageHistory.clean",
"kibitzr.app.Application",
"kibitzr.stash.Stash.print_content",
"click.group",
"kibitzr.app.Application.bootstrap"
] |
[((623, 636), 'click.group', 'click.group', ([], {}), '()\n', (634, 636), False, 'import click\n'), ((1262, 1294), 'click.argument', 'click.argument', (['"""name"""'], {'nargs': '(-1)'}), "('name', nargs=-1)\n", (1276, 1294), False, 'import click\n'), ((1538, 1570), 'click.argument', 'click.argument', (['"""name"""'], {'nargs': '(-1)'}), "('name', nargs=-1)\n", (1552, 1570), False, 'import click\n'), ((1429, 1442), 'kibitzr.app.Application', 'Application', ([], {}), '()\n', (1440, 1442), False, 'from kibitzr.app import Application\n'), ((1706, 1719), 'kibitzr.app.Application', 'Application', ([], {}), '()\n', (1717, 1719), False, 'from kibitzr.app import Application\n'), ((1920, 1943), 'kibitzr.app.Application.bootstrap', 'Application.bootstrap', ([], {}), '()\n', (1941, 1943), False, 'from kibitzr.app import Application\n'), ((2144, 2157), 'kibitzr.app.Application', 'Application', ([], {}), '()\n', (2155, 2157), False, 'from kibitzr.app import Application\n'), ((2291, 2310), 'kibitzr.storage.PageHistory.clean', 'PageHistory.clean', ([], {}), '()\n', (2308, 2310), False, 'from kibitzr.storage import PageHistory\n'), ((2412, 2433), 'kibitzr.stash.Stash.print_content', 'Stash.print_content', ([], {}), '()\n', (2431, 2433), False, 'from kibitzr.stash import Stash\n'), ((573, 613), 'entrypoints.get_group_all', 'entrypoints.get_group_all', (['"""kibitzr.cli"""'], {}), "('kibitzr.cli')\n", (598, 613), False, 'import entrypoints\n'), ((1216, 1229), 'kibitzr.app.Application', 'Application', ([], {}), '()\n', (1227, 1229), False, 'from kibitzr.app import Application\n')]
|
"""Tests for the create_protocol_runner factory."""
import pytest
from pathlib import Path
from opentrons.hardware_control import API as HardwareAPI
from opentrons.protocol_engine import ProtocolEngine, create_protocol_engine
from opentrons.file_runner import (
ProtocolFileType,
ProtocolFile,
JsonFileRunner,
PythonFileRunner,
create_file_runner,
)
@pytest.fixture
async def protocol_engine(hardware: HardwareAPI) -> ProtocolEngine:
"""Get an actual ProtocolEngine for smoke-test purposes."""
return await create_protocol_engine(hardware=hardware)
async def test_create_json_runner(
protocol_engine: ProtocolEngine,
json_protocol_file: Path,
) -> None:
"""It should be able to create a JSON file runner."""
protocol_file = ProtocolFile(
file_type=ProtocolFileType.JSON,
file_path=json_protocol_file,
)
result = create_file_runner(
protocol_file=protocol_file,
engine=protocol_engine,
)
assert isinstance(result, JsonFileRunner)
async def test_create_python_runner(
protocol_engine: ProtocolEngine,
python_protocol_file: Path,
) -> None:
"""It should be able to create a Python file runner."""
protocol_file = ProtocolFile(
file_type=ProtocolFileType.PYTHON,
file_path=python_protocol_file,
)
result = create_file_runner(
protocol_file=protocol_file,
engine=protocol_engine,
)
assert isinstance(result, PythonFileRunner)
|
[
"opentrons.file_runner.create_file_runner",
"opentrons.file_runner.ProtocolFile",
"opentrons.protocol_engine.create_protocol_engine"
] |
[((773, 848), 'opentrons.file_runner.ProtocolFile', 'ProtocolFile', ([], {'file_type': 'ProtocolFileType.JSON', 'file_path': 'json_protocol_file'}), '(file_type=ProtocolFileType.JSON, file_path=json_protocol_file)\n', (785, 848), False, 'from opentrons.file_runner import ProtocolFileType, ProtocolFile, JsonFileRunner, PythonFileRunner, create_file_runner\n'), ((886, 957), 'opentrons.file_runner.create_file_runner', 'create_file_runner', ([], {'protocol_file': 'protocol_file', 'engine': 'protocol_engine'}), '(protocol_file=protocol_file, engine=protocol_engine)\n', (904, 957), False, 'from opentrons.file_runner import ProtocolFileType, ProtocolFile, JsonFileRunner, PythonFileRunner, create_file_runner\n'), ((1227, 1306), 'opentrons.file_runner.ProtocolFile', 'ProtocolFile', ([], {'file_type': 'ProtocolFileType.PYTHON', 'file_path': 'python_protocol_file'}), '(file_type=ProtocolFileType.PYTHON, file_path=python_protocol_file)\n', (1239, 1306), False, 'from opentrons.file_runner import ProtocolFileType, ProtocolFile, JsonFileRunner, PythonFileRunner, create_file_runner\n'), ((1344, 1415), 'opentrons.file_runner.create_file_runner', 'create_file_runner', ([], {'protocol_file': 'protocol_file', 'engine': 'protocol_engine'}), '(protocol_file=protocol_file, engine=protocol_engine)\n', (1362, 1415), False, 'from opentrons.file_runner import ProtocolFileType, ProtocolFile, JsonFileRunner, PythonFileRunner, create_file_runner\n'), ((538, 579), 'opentrons.protocol_engine.create_protocol_engine', 'create_protocol_engine', ([], {'hardware': 'hardware'}), '(hardware=hardware)\n', (560, 579), False, 'from opentrons.protocol_engine import ProtocolEngine, create_protocol_engine\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2022/4/18 11:45 上午
# @File : test_api.py
# @Author:
# @Desc : 测试
import unittest
import requests
import time, os
import json
import base64
import random
import string
import pickle
import sys
class LSTMKQGATestCase(unittest.TestCase):
host_server = f'http://l8:9966'
def test_lstmkgqa_file(self):
"""
测试文件接口
:return:
:rtype:
"""
url = f"{self.host_server}/api/predict_file"
params = {'data_apth': "./../data/QA_data/MetaQA/qa_test_1hop.txt"}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(params), timeout=360)
result = r.json()
print(result)
assert r.status_code == 200
assert result is not None, "返回结果为None"
#检查结果,里面肯定是字典格式
print("对文件接口测试完成")
def test_lstmkgqa(self):
"""
测试数据的正确答案
what does [Grégoire Colin] appear in Before the Rain
[Joe Thomas] appears in which movies The Inbetweeners Movie|The Inbetweeners 2
what films did [Michelle Trachtenberg] star in Inspector Gadget|Black Christmas|Ice Princess|Harriet the Spy|The Scribbler
what does [Helen Mack] star in The Son of Kong|Kiss and Make-Up|Divorce
测试接口
:return:
['问题是:what does Grégoire Colin appear in, 答案是: Before the Rain', '问题是:NE appears in which movies, 答案是: The Inbetweeners Movie', '问题是:what films did Michelle Trachtenberg star in, 答案是: Harriet the Spy', '问题是:what does <NAME> star in, 答案是: The Son of Kong', '问题是:what films did Shahid Kapoor act in, 答案是: Haider']
:rtype:
"""
url = f"{self.host_server}/api/predict"
data = [
['<NAME>', 'what does NE appear in'],
['<NAME>', 'NE appears in which movies'],
['<NAME>', 'what films did NE star in'],
['<NAME>', 'what does NE star in'],
['<NAME>', 'what films did NE act in'],
]
params = {'data':data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(params), timeout=360)
result = r.json()
print(result)
assert r.status_code == 200
assert result is not None, "返回结果为None"
#检查结果,里面肯定是字典格式
print("对文件接口测试完成")
|
[
"json.dumps"
] |
[((674, 692), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (684, 692), False, 'import json\n'), ((2156, 2174), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (2166, 2174), False, 'import json\n')]
|
"""Trajectory Generator for in-place stepping motion for quadruped robot."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
TWO_PI = 2 * math.pi
def _get_actions_asymmetric_sine(phase, tg_params):
"""Returns the leg extension given current phase of TG and parameters.
Args:
phase: a number in [0, 2pi) representing current leg phase
tg_params: a dictionary of tg parameters:
stance_lift_cutoff -- switches the TG between stance (phase < cutoff) and
lift (phase > cutoff) phase
amplitude_swing -- amplitude in swing phase
amplitude_lift -- amplitude in lift phase
center_extension -- center of leg extension
"""
stance_lift_cutoff = tg_params['stance_lift_cutoff']
a_prime = np.where(phase < stance_lift_cutoff, tg_params['amplitude_stance'],
tg_params['amplitude_lift'])
scaled_phase = np.where(
phase > stance_lift_cutoff, np.pi + (phase - stance_lift_cutoff) /
(TWO_PI - stance_lift_cutoff) * np.pi, phase / stance_lift_cutoff * np.pi)
return tg_params['center_extension'] + a_prime * np.sin(scaled_phase)
def step(current_phases, leg_frequencies, dt, tg_params):
"""Steps forward the in-place trajectory generator.
Args:
current_phases: phases of each leg.
leg_frequencies: the frequency to proceed the phase of each leg.
dt: amount of time (sec) between consecutive time steps.
tg_params: a set of parameters for trajectory generator, see the docstring
of "_get_actions_asymmetric_sine" for details.
Returns:
actions: leg swing/extensions as output by the trajectory generator.
new_state: new swing/extension.
"""
new_phases = np.fmod(current_phases + TWO_PI * leg_frequencies * dt, TWO_PI)
extensions = []
for leg_id in range(4):
extensions.append(
_get_actions_asymmetric_sine(new_phases[..., leg_id], tg_params))
return new_phases, extensions
def reset():
return np.array([0, np.pi * 0.5, np.pi, np.pi * 1.5])
|
[
"numpy.sin",
"numpy.where",
"numpy.array",
"numpy.fmod"
] |
[((843, 943), 'numpy.where', 'np.where', (['(phase < stance_lift_cutoff)', "tg_params['amplitude_stance']", "tg_params['amplitude_lift']"], {}), "(phase < stance_lift_cutoff, tg_params['amplitude_stance'],\n tg_params['amplitude_lift'])\n", (851, 943), True, 'import numpy as np\n'), ((978, 1132), 'numpy.where', 'np.where', (['(phase > stance_lift_cutoff)', '(np.pi + (phase - stance_lift_cutoff) / (TWO_PI - stance_lift_cutoff) * np.pi)', '(phase / stance_lift_cutoff * np.pi)'], {}), '(phase > stance_lift_cutoff, np.pi + (phase - stance_lift_cutoff) /\n (TWO_PI - stance_lift_cutoff) * np.pi, phase / stance_lift_cutoff * np.pi)\n', (986, 1132), True, 'import numpy as np\n'), ((1781, 1844), 'numpy.fmod', 'np.fmod', (['(current_phases + TWO_PI * leg_frequencies * dt)', 'TWO_PI'], {}), '(current_phases + TWO_PI * leg_frequencies * dt, TWO_PI)\n', (1788, 1844), True, 'import numpy as np\n'), ((2042, 2088), 'numpy.array', 'np.array', (['[0, np.pi * 0.5, np.pi, np.pi * 1.5]'], {}), '([0, np.pi * 0.5, np.pi, np.pi * 1.5])\n', (2050, 2088), True, 'import numpy as np\n'), ((1193, 1213), 'numpy.sin', 'np.sin', (['scaled_phase'], {}), '(scaled_phase)\n', (1199, 1213), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
import RPi.GPIO as GPIO
import time
import subprocess
import os
PIR_PIN = 11 # GPIO11
GPIO.setmode(GPIO.BOARD) # Use header pin numbers
GPIO.setup(PIR_PIN, GPIO.IN)
running = False # Is a video currently playing?
player = "omxplayer" # The video player being used
video_path = "/home/pi/video.mp4" # Path to video file
child = 0
if player == "vlc":
opt = '--play-and-exit'
else:
opt = ''
try:
print("Waiting for motion")
while True:
if not GPIO.input(PIR_PIN):
if running == False:
print("Motion detected")
child = subprocess.Popen([player, video_path, opt])
running = True
print("Playing video")
if running == True:
child.poll()
if child.returncode == 0:
running = False
print("Video complete, waiting for motion")
time.sleep(1)
except KeyboardInterrupt:
print("Quit")
GPIO.cleanup()
|
[
"RPi.GPIO.setmode",
"subprocess.Popen",
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"time.sleep",
"RPi.GPIO.input"
] |
[((170, 194), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (182, 194), True, 'import RPi.GPIO as GPIO\n'), ((227, 255), 'RPi.GPIO.setup', 'GPIO.setup', (['PIR_PIN', 'GPIO.IN'], {}), '(PIR_PIN, GPIO.IN)\n', (237, 255), True, 'import RPi.GPIO as GPIO\n'), ((1097, 1111), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (1109, 1111), True, 'import RPi.GPIO as GPIO\n'), ((577, 596), 'RPi.GPIO.input', 'GPIO.input', (['PIR_PIN'], {}), '(PIR_PIN)\n', (587, 596), True, 'import RPi.GPIO as GPIO\n'), ((1022, 1035), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1032, 1035), False, 'import time\n'), ((696, 739), 'subprocess.Popen', 'subprocess.Popen', (['[player, video_path, opt]'], {}), '([player, video_path, opt])\n', (712, 739), False, 'import subprocess\n')]
|
###
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from csv import DictWriter
from json import load as load_json
import logging
from io import StringIO
from time import sleep
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isb_cgc.settings")
import django
django.setup()
import click
from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper
logging.basicConfig(level=logging.INFO)
def run_query(project_id, provider, config):
job_reference = provider.submit_query_and_get_job_ref(project_id)
poll_retry_limit = provider.BQ_JOB_POLL_MAX_RETRIES
poll_sleep_time = provider.BQ_JOB_POLL_SLEEP_TIME
all_done = False
total_retries = 0
poll_count = 0
# Poll for completion
while all_done is False and total_retries < poll_retry_limit:
poll_count += 1
total_retries += 1
is_finished = provider.is_bigquery_job_finished(project_id)
all_done = is_finished
sleep(poll_sleep_time)
logging.debug("Done: {done} retry: {retry}".format(done=str(all_done), retry=total_retries))
query_result = provider.download_and_unpack_query_result()
return query_result
def load_config_from_path(config_class, config_json_path):
config_dict = load_json(open(config_json_path, 'r'))
return config_class.from_dict(config_dict)
def get_csv_object(data_rows, schema, include_header=False):
fieldnames = [x['name'] for x in schema]
file_obj = StringIO()
writer = DictWriter(file_obj, fieldnames=fieldnames)
if include_header:
writer.writeheader()
writer.writerows(data_rows)
return file_obj
def save_csv(data_rows, schema, csv_path, include_header=False):
file_obj = get_csv_object(data_rows, schema, include_header=include_header)
with open(csv_path, 'w') as file_handle:
file_handle.write(file_obj.getvalue())
@click.command()
@click.argument('data_type', type=str)
@click.option('--config_json', type=str)
@click.option('-chr', "chromosome_array", type=str, multiple=True, help="Chromosome (required for methylation)")
def print_query(data_type, config_json, chromosome_array):
feature_type = FeatureDataTypeHelper.get_type(data_type)
logging.info("Feature type: {}".format(str(feature_type)))
config_class = FeatureDataTypeHelper.get_feature_def_config_from_data_type(feature_type)
provider_class = FeatureDataTypeHelper.get_feature_def_provider_from_data_type(feature_type)
if config_json is not None:
config_instance = load_config_from_path(config_class, config_json)
else:
config_dict = FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type(feature_type)
config_instance = config_class.from_dict(config_dict)
if not chromosome_array:
chromosome_array = [str(c) for c in range(1, 23)]
chromosome_array.extend(['X', 'Y'])
provider = provider_class(config_instance, chromosome_array=chromosome_array)
query = provider.build_query(config_instance)
print(query)
# project_id: project number of the BQ data project (typically isb-cgc's project number)
# data_type: 4-letter data type code, eg. GNAB
@click.command()
@click.argument('project_id', type=click.INT)
@click.argument('data_type', type=str)
@click.argument('csv_path', type=str)
@click.option('--config_json', type=str)
@click.option('-chr', "chromosome_array", type=str, multiple=True, help="Chromosome (required for methylation)")
def run(project_id, data_type, csv_path, config_json, chromosome_array):
feature_type = FeatureDataTypeHelper.get_type(data_type)
logging.info("Feature type: {}".format(str(feature_type)))
config_class = FeatureDataTypeHelper.get_feature_def_config_from_data_type(feature_type)
provider_class = FeatureDataTypeHelper.get_feature_def_provider_from_data_type(feature_type)
if config_json is not None:
config_instance = load_config_from_path(config_class, config_json)
else:
config_dict = FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type(feature_type)
config_instance = config_class.from_dict(config_dict)
if not chromosome_array:
chromosome_array = [str(c) for c in range(1, 23)]
chromosome_array.extend(['X', 'Y'])
else:
chromosome_array = chromosome_array[0].split(",")
provider = provider_class(config_instance, chromosome_array=chromosome_array)
logging.info("Output CSV: {}".format(csv_path))
logging.info("Config: {}".format(str(config_instance)))
result = run_query(project_id, provider, config_instance)
save_csv(result, provider.get_mysql_schema(), csv_path, include_header=True)
@click.group()
def main():
pass
main.add_command(print_query)
main.add_command(run)
if __name__ == '__main__':
main()
|
[
"io.StringIO",
"bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_provider_from_data_type",
"os.environ.setdefault",
"django.setup",
"logging.basicConfig",
"click.argument",
"bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type",
"future.standard_library.install_aliases",
"bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_config_from_data_type",
"bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_type",
"click.option",
"click.command",
"time.sleep",
"builtins.str",
"click.group",
"builtins.range",
"csv.DictWriter"
] |
[((681, 715), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (713, 715), False, 'from future import standard_library\n'), ((903, 970), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""isb_cgc.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'isb_cgc.settings')\n", (924, 970), False, 'import os\n'), ((986, 1000), 'django.setup', 'django.setup', ([], {}), '()\n', (998, 1000), False, 'import django\n'), ((1087, 1126), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1106, 1126), False, 'import logging\n'), ((2583, 2598), 'click.command', 'click.command', ([], {}), '()\n', (2596, 2598), False, 'import click\n'), ((2600, 2637), 'click.argument', 'click.argument', (['"""data_type"""'], {'type': 'str'}), "('data_type', type=str)\n", (2614, 2637), False, 'import click\n'), ((2639, 2678), 'click.option', 'click.option', (['"""--config_json"""'], {'type': 'str'}), "('--config_json', type=str)\n", (2651, 2678), False, 'import click\n'), ((2680, 2796), 'click.option', 'click.option', (['"""-chr"""', '"""chromosome_array"""'], {'type': 'str', 'multiple': '(True)', 'help': '"""Chromosome (required for methylation)"""'}), "('-chr', 'chromosome_array', type=str, multiple=True, help=\n 'Chromosome (required for methylation)')\n", (2692, 2796), False, 'import click\n'), ((3877, 3892), 'click.command', 'click.command', ([], {}), '()\n', (3890, 3892), False, 'import click\n'), ((3894, 3938), 'click.argument', 'click.argument', (['"""project_id"""'], {'type': 'click.INT'}), "('project_id', type=click.INT)\n", (3908, 3938), False, 'import click\n'), ((3940, 3977), 'click.argument', 'click.argument', (['"""data_type"""'], {'type': 'str'}), "('data_type', type=str)\n", (3954, 3977), False, 'import click\n'), ((3979, 4015), 'click.argument', 'click.argument', (['"""csv_path"""'], {'type': 'str'}), "('csv_path', type=str)\n", (3993, 4015), False, 'import click\n'), ((4017, 4056), 'click.option', 'click.option', (['"""--config_json"""'], {'type': 'str'}), "('--config_json', type=str)\n", (4029, 4056), False, 'import click\n'), ((4058, 4174), 'click.option', 'click.option', (['"""-chr"""', '"""chromosome_array"""'], {'type': 'str', 'multiple': '(True)', 'help': '"""Chromosome (required for methylation)"""'}), "('-chr', 'chromosome_array', type=str, multiple=True, help=\n 'Chromosome (required for methylation)')\n", (4070, 4174), False, 'import click\n'), ((5388, 5401), 'click.group', 'click.group', ([], {}), '()\n', (5399, 5401), False, 'import click\n'), ((2169, 2179), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2177, 2179), False, 'from io import StringIO\n'), ((2193, 2236), 'csv.DictWriter', 'DictWriter', (['file_obj'], {'fieldnames': 'fieldnames'}), '(file_obj, fieldnames=fieldnames)\n', (2203, 2236), False, 'from csv import DictWriter\n'), ((2871, 2912), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_type', 'FeatureDataTypeHelper.get_type', (['data_type'], {}), '(data_type)\n', (2901, 2912), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((2995, 3068), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_config_from_data_type', 'FeatureDataTypeHelper.get_feature_def_config_from_data_type', (['feature_type'], {}), '(feature_type)\n', (3054, 3068), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((3090, 3165), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_provider_from_data_type', 'FeatureDataTypeHelper.get_feature_def_provider_from_data_type', (['feature_type'], {}), '(feature_type)\n', (3151, 3165), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((4262, 4303), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_type', 'FeatureDataTypeHelper.get_type', (['data_type'], {}), '(data_type)\n', (4292, 4303), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((4386, 4459), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_config_from_data_type', 'FeatureDataTypeHelper.get_feature_def_config_from_data_type', (['feature_type'], {}), '(feature_type)\n', (4445, 4459), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((4481, 4556), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_provider_from_data_type', 'FeatureDataTypeHelper.get_feature_def_provider_from_data_type', (['feature_type'], {}), '(feature_type)\n', (4542, 4556), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((1669, 1691), 'time.sleep', 'sleep', (['poll_sleep_time'], {}), '(poll_sleep_time)\n', (1674, 1691), False, 'from time import sleep\n'), ((3306, 3397), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type', 'FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type', (['feature_type'], {}), '(\n feature_type)\n', (3378, 3397), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((4697, 4788), 'bq_data_access.v2.feature_id_utils.FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type', 'FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type', (['feature_type'], {}), '(\n feature_type)\n', (4769, 4788), False, 'from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper\n'), ((2956, 2973), 'builtins.str', 'str', (['feature_type'], {}), '(feature_type)\n', (2959, 2973), False, 'from builtins import str\n'), ((3513, 3519), 'builtins.str', 'str', (['c'], {}), '(c)\n', (3516, 3519), False, 'from builtins import str\n'), ((4347, 4364), 'builtins.str', 'str', (['feature_type'], {}), '(feature_type)\n', (4350, 4364), False, 'from builtins import str\n'), ((4904, 4910), 'builtins.str', 'str', (['c'], {}), '(c)\n', (4907, 4910), False, 'from builtins import str\n'), ((5219, 5239), 'builtins.str', 'str', (['config_instance'], {}), '(config_instance)\n', (5222, 5239), False, 'from builtins import str\n'), ((1756, 1769), 'builtins.str', 'str', (['all_done'], {}), '(all_done)\n', (1759, 1769), False, 'from builtins import str\n'), ((3529, 3541), 'builtins.range', 'range', (['(1)', '(23)'], {}), '(1, 23)\n', (3534, 3541), False, 'from builtins import range\n'), ((4920, 4932), 'builtins.range', 'range', (['(1)', '(23)'], {}), '(1, 23)\n', (4925, 4932), False, 'from builtins import range\n')]
|
import abna
import json
import settings
class ABNClient:
mutations = None
new_last_transaction = None
FILENAME = "last_transactions.json"
def __init__(self):
self.sess = abna.Session(settings.ABNA_ACCOUNT)
self.sess.login(settings.ABNA_PASSNUMBER, settings.ABNA_PASSWORD)
self.last_transactions = self.get_last_transaction_timestamp()
def get_mutations(self, iban):
mutations = self.sess.mutations(iban)
return self.get_only_new_mutations(iban, mutations)
def get_only_new_mutations(self, iban, mutations):
result = []
last_transaction_timestamp = int(self.last_transactions.get(iban, 0))
new_last_transaction = 0
for mutation in mutations['mutationsList']['mutations']:
transaction_timestamp = int(mutation['mutation']['transactionTimestamp'])
if transaction_timestamp > new_last_transaction:
new_last_transaction = transaction_timestamp
if transaction_timestamp > last_transaction_timestamp:
result.append(mutation['mutation'])
self.last_transactions[iban] = new_last_transaction
return result
def save_last_transaction_timestamp(self):
with open(self.FILENAME, 'w') as f:
json.dump(self.last_transactions, f)
def get_last_transaction_timestamp(self):
try:
with open(self.FILENAME, 'r') as f:
data = json.load(f)
return data
except FileNotFoundError:
return {}
|
[
"json.dump",
"abna.Session",
"json.load"
] |
[((197, 232), 'abna.Session', 'abna.Session', (['settings.ABNA_ACCOUNT'], {}), '(settings.ABNA_ACCOUNT)\n', (209, 232), False, 'import abna\n'), ((1285, 1321), 'json.dump', 'json.dump', (['self.last_transactions', 'f'], {}), '(self.last_transactions, f)\n', (1294, 1321), False, 'import json\n'), ((1453, 1465), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1462, 1465), False, 'import json\n')]
|
# Adapted from:
# https://www.analyticsvidhya.com/blog/2016/08/beginners-guide-to-topic-modeling-in-python/
import read_bibtex
import os, shutil
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
import string
import gensim
from gensim import corpora
from gensim.test.utils import datapath
import numpy as np
stop = set(stopwords.words('english'))
stop.add("exist")
stop.add("because")
stop.add("via")
stop.add("interest")
stop.add("therefore")
stop.add("hence")
stop.add("this")
exclude = set(string.punctuation)
exclude.add("-")
exclude.add("_")
exclude.add(".")
exclude.add(";")
lemma = WordNetLemmatizer()
stemmer = PorterStemmer()
ntopics = 30
npasses = 400
result_dir="doc_results_all_500_30"
model_dir="model_all_500_30"
year_from=1980
# Creating the object for LDA model using gensim library
Lda = gensim.models.ldamodel.LdaModel
def clean(doc):
punc_free = ''.join(ch for ch in doc if ch not in exclude)
lemmatized = " ".join(lemma.lemmatize(word)+" " for word in punc_free.lower().split())
stemmed = " ".join(stemmer.stem(word) for word in lemmatized.split())
stop_free = " ".join([i for i in stemmed.split() if i not in stop])
return stop_free
def main():
if result_dir in os.listdir("."): shutil.rmtree("./"+result_dir)
os.mkdir("./"+result_dir)
# Read and clean data
doc_set = read_bibtex.bibtex_tostring_from(year_from)
doc_clean = [clean(doc).split() for doc in doc_set]
# Creating the term dictionary of our courpus, where every unique term is assigned an index.
dictionary = corpora.Dictionary(doc_clean)
# Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
# Loading the LDA model
ldamodel = Lda.load("./"+model_dir+"/all")
# Infer topic distribution for each doc
topic_dist = [ldamodel.get_document_topics(dictionary.doc2bow(doc)) for doc in doc_clean]
# Save results
np.save("./"+result_dir+"/all", np.array(topic_dist))
dist_array = np.array(topic_dist)
transpose_array = [[] for x in range(n_topics)]
for itr in range(len(dist_array)):
for top, weight in dist_array[itr]:
transpose_array[top].append((itr, weight))
for row in transpose_array:
row.sort(key=lambda x: x[1], reverse=True)
np.save("./"+result_dir+"/all_transpose", np.array(transpose_array))
main()
|
[
"os.mkdir",
"shutil.rmtree",
"nltk.stem.porter.PorterStemmer",
"gensim.corpora.Dictionary",
"numpy.array",
"nltk.corpus.stopwords.words",
"nltk.stem.wordnet.WordNetLemmatizer",
"read_bibtex.bibtex_tostring_from",
"os.listdir"
] |
[((670, 689), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (687, 689), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((700, 715), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (713, 715), False, 'from nltk.stem.porter import PorterStemmer\n'), ((397, 423), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (412, 423), False, 'from nltk.corpus import stopwords\n'), ((1343, 1370), 'os.mkdir', 'os.mkdir', (["('./' + result_dir)"], {}), "('./' + result_dir)\n", (1351, 1370), False, 'import os, shutil\n'), ((1410, 1453), 'read_bibtex.bibtex_tostring_from', 'read_bibtex.bibtex_tostring_from', (['year_from'], {}), '(year_from)\n', (1442, 1453), False, 'import read_bibtex\n'), ((1625, 1654), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (['doc_clean'], {}), '(doc_clean)\n', (1643, 1654), False, 'from gensim import corpora\n'), ((2144, 2164), 'numpy.array', 'np.array', (['topic_dist'], {}), '(topic_dist)\n', (2152, 2164), True, 'import numpy as np\n'), ((1291, 1306), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (1301, 1306), False, 'import os, shutil\n'), ((1308, 1340), 'shutil.rmtree', 'shutil.rmtree', (["('./' + result_dir)"], {}), "('./' + result_dir)\n", (1321, 1340), False, 'import os, shutil\n'), ((2103, 2123), 'numpy.array', 'np.array', (['topic_dist'], {}), '(topic_dist)\n', (2111, 2123), True, 'import numpy as np\n'), ((2485, 2510), 'numpy.array', 'np.array', (['transpose_array'], {}), '(transpose_array)\n', (2493, 2510), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.contrib import slim
import tensorflow.contrib.layers as layers
from ..configuration import *
from .text_classification_train import main
class ModelSimple(object):
"""
Base class to create models for text classification. It uses several layers of GRU cells.
"""
def model(self, input_text_begin, input_text_end, gene, variation, num_output_classes,
batch_size, embeddings, training=True, dropout=TC_MODEL_DROPOUT):
"""
Creates a model for text classification
:param tf.Tensor input_text: the input data, the text as
[batch_size, text_vector_max_length, embeddings_size]
:param int num_output_classes: the number of output classes for the classifier
:param int batch_size: batch size, the same used in the dataset
:param List[List[float]] embeddings: a matrix with the embeddings for the embedding lookup
:param int num_hidden: number of hidden GRU cells in every layer
:param int num_layers: number of layers of the model
:param float dropout: dropout value between layers
:param boolean training: whether the model is built for training or not
:return Dict[str,tf.Tensor]: a dict with logits and prediction tensors
"""
input_text_begin = tf.reshape(input_text_begin, [batch_size, MAX_WORDS])
if input_text_end is not None:
input_text_end = tf.reshape(input_text_end, [batch_size, MAX_WORDS])
embedded_sequence_begin, sequence_length_begin, \
embedded_sequence_end, sequence_length_end, \
gene, variation = \
self.model_embedded_sequence(embeddings, input_text_begin, input_text_end, gene,
variation)
_, max_length, _ = tf.unstack(tf.shape(embedded_sequence_begin))
with tf.variable_scope('text_begin'):
output_begin = self.rnn(embedded_sequence_begin, sequence_length_begin, max_length,
dropout, batch_size, training)
if input_text_end is not None:
with tf.variable_scope('text_end'):
output_end = self.rnn(embedded_sequence_end, sequence_length_end, max_length,
dropout, batch_size, training)
output = tf.concat([output_begin, output_end], axis=1)
else:
output = output_begin
# full connected layer
logits = self.model_fully_connected(output, gene, variation, num_output_classes, dropout,
training)
prediction = tf.nn.softmax(logits)
return {
'logits' : logits,
'prediction': prediction,
}
def rnn(self, sequence, sequence_length, max_length, dropout, batch_size, training,
num_hidden=TC_MODEL_HIDDEN, num_layers=TC_MODEL_LAYERS):
# Recurrent network.
cells = []
for _ in range(num_layers):
cell = tf.nn.rnn_cell.GRUCell(num_hidden)
if training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=dropout)
cells.append(cell)
network = tf.nn.rnn_cell.MultiRNNCell(cells)
type = sequence.dtype
sequence_output, _ = tf.nn.dynamic_rnn(network, sequence, dtype=tf.float32,
sequence_length=sequence_length,
initial_state=network.zero_state(batch_size, type))
# get last output of the dynamic_rnn
sequence_output = tf.reshape(sequence_output, [batch_size * max_length, num_hidden])
indexes = tf.range(batch_size) * max_length + (sequence_length - 1)
output = tf.gather(sequence_output, indexes)
return output
def model_fully_connected(self, output, gene, variation, num_output_classes, dropout, training):
output = layers.dropout(output, keep_prob=dropout, is_training=training)
net = tf.concat([output, gene, variation], axis=1)
net = layers.fully_connected(net, 128, activation_fn=tf.nn.relu)
net = layers.dropout(net, keep_prob=dropout, is_training=training)
logits = layers.fully_connected(net, num_output_classes, activation_fn=None)
return logits
def remove_padding(self, input_text):
# calculate max length of the input_text
mask = tf.greater_equal(input_text, 0) # true for words false for padding
sequence_length = tf.reduce_sum(tf.cast(mask, tf.int32), 1)
# truncate the input text to max length
max_sequence_length = tf.reduce_max(sequence_length)
input_text_length = tf.shape(input_text)[1]
empty_padding_lenght = input_text_length - max_sequence_length
input_text, _ = tf.split(input_text, [max_sequence_length, empty_padding_lenght], axis=1)
return input_text, sequence_length
def model_embedded_sequence(self, embeddings, input_text_begin, input_text_end, gene,
variation):
"""
Given the embeddings and the input text returns the embedded sequence and
the sequence length. The input_text is truncated to the max length of the sequence, so
the output embedded_sequence wont have the same shape as input_text or even a constant shape
:param embeddings:
:param input_text:
:return: (embedded_sequence, sequence_length)
"""
input_text_begin, sequence_length_begin = self.remove_padding(input_text_begin)
if input_text_end is not None:
input_text_end, sequence_length_end = self.remove_padding(input_text_end)
else:
sequence_length_end = None
variation, variation_length = self.remove_padding(variation)
# create the embeddings
# first vector is a zeros vector used for padding
embeddings_dimension = len(embeddings[0])
embeddings = [[0.0] * embeddings_dimension] + embeddings
embeddings = tf.constant(embeddings, name='embeddings', dtype=tf.float32)
# this means we need to add 1 to the input_text
input_text_begin = tf.add(input_text_begin, 1)
if input_text_end is not None:
input_text_end = tf.add(input_text_end, 1)
gene = tf.add(gene, 1)
variation = tf.add(variation, 1)
embedded_sequence_begin = tf.nn.embedding_lookup(embeddings, input_text_begin)
if input_text_end is not None:
embedded_sequence_end = tf.nn.embedding_lookup(embeddings, input_text_end)
else:
embedded_sequence_end = None
embedded_gene = tf.nn.embedding_lookup(embeddings, gene)
embedded_gene = tf.squeeze(embedded_gene, axis=1)
embedded_variation = tf.nn.embedding_lookup(embeddings, variation)
embedded_variation = tf.reduce_mean(embedded_variation, axis=1)
return embedded_sequence_begin, sequence_length_begin, \
embedded_sequence_end, sequence_length_end, \
embedded_gene, embedded_variation
def model_arg_scope(self, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
with slim.arg_scope([slim.batch_norm],
decay=batch_norm_decay,
epsilon=batch_norm_epsilon,
activation_fn=None) as scope:
return scope
def targets(self, labels, output_classes):
"""
Transform a vector of labels into a matrix of one hot encoding labels
:param tf.Tensor labels: an array of labels with dimension [batch_size]
:param int output_classes: the total number of output classes
:return tf.Tensor: a tensorflow tensor
"""
targets = tf.one_hot(labels, axis=-1, depth=output_classes, on_value=1.0, off_value=0.0)
targets = tf.squeeze(targets, axis=1)
return targets
def loss(self, targets, graph_data):
"""
Calculates the softmax cross entropy loss
:param tf.Tensor logits: logits output of the model
:param tf.Tensor targets: targets with the one hot encoding labels
:return tf.Tensor : a tensor with the loss value
"""
logits = graph_data['logits']
loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets, logits=logits)
return tf.reduce_mean(loss)
def optimize(self, loss, global_step,
learning_rate_initial=TC_LEARNING_RATE_INITIAL,
learning_rate_decay=TC_LEARNING_RATE_DECAY,
learning_rate_decay_steps=TC_LEARNING_RATE_DECAY_STEPS):
"""
Creates a learning rate and an optimizer for the loss
:param tf.Tensor loss: the tensor with the loss of the model
:param tf.Tensor global_step: the global step for training
:param int learning_rate_initial: the initial learning rate
:param int learning_rate_decay: the decay of the learning rate
:param int learning_rate_decay_steps: the number of steps to decay the learning rate
:return (tf.Tensor, tf.Tensor): a tuple with the optimizer and the learning rate
"""
learning_rate = tf.train.exponential_decay(learning_rate_initial, global_step,
learning_rate_decay_steps,
learning_rate_decay,
staircase=True, name='learning_rate')
# optimizer
optimizer = tf.train.RMSPropOptimizer(learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# optimizer = tf.train.AdamOptimizer(learning_rate)
optimizer = optimizer.minimize(loss, global_step=global_step)
return optimizer, learning_rate
if __name__ == '__main__':
main(ModelSimple(), 'simple', batch_size=TC_BATCH_SIZE)
|
[
"tensorflow.reshape",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.greater_equal",
"tensorflow.reduce_max",
"tensorflow.split",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.gather",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.dropout",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.range",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.nn.embedding_lookup",
"tensorflow.add",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.nn.rnn_cell.MultiRNNCell",
"tensorflow.train.exponential_decay",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.shape"
] |
[((1325, 1378), 'tensorflow.reshape', 'tf.reshape', (['input_text_begin', '[batch_size, MAX_WORDS]'], {}), '(input_text_begin, [batch_size, MAX_WORDS])\n', (1335, 1378), True, 'import tensorflow as tf\n'), ((2638, 2659), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2651, 2659), True, 'import tensorflow as tf\n'), ((3219, 3253), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['cells'], {}), '(cells)\n', (3246, 3253), True, 'import tensorflow as tf\n'), ((3619, 3685), 'tensorflow.reshape', 'tf.reshape', (['sequence_output', '[batch_size * max_length, num_hidden]'], {}), '(sequence_output, [batch_size * max_length, num_hidden])\n', (3629, 3685), True, 'import tensorflow as tf\n'), ((3779, 3814), 'tensorflow.gather', 'tf.gather', (['sequence_output', 'indexes'], {}), '(sequence_output, indexes)\n', (3788, 3814), True, 'import tensorflow as tf\n'), ((3956, 4019), 'tensorflow.contrib.layers.dropout', 'layers.dropout', (['output'], {'keep_prob': 'dropout', 'is_training': 'training'}), '(output, keep_prob=dropout, is_training=training)\n', (3970, 4019), True, 'import tensorflow.contrib.layers as layers\n'), ((4034, 4078), 'tensorflow.concat', 'tf.concat', (['[output, gene, variation]'], {'axis': '(1)'}), '([output, gene, variation], axis=1)\n', (4043, 4078), True, 'import tensorflow as tf\n'), ((4093, 4151), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['net', '(128)'], {'activation_fn': 'tf.nn.relu'}), '(net, 128, activation_fn=tf.nn.relu)\n', (4115, 4151), True, 'import tensorflow.contrib.layers as layers\n'), ((4166, 4226), 'tensorflow.contrib.layers.dropout', 'layers.dropout', (['net'], {'keep_prob': 'dropout', 'is_training': 'training'}), '(net, keep_prob=dropout, is_training=training)\n', (4180, 4226), True, 'import tensorflow.contrib.layers as layers\n'), ((4244, 4311), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['net', 'num_output_classes'], {'activation_fn': 'None'}), '(net, num_output_classes, activation_fn=None)\n', (4266, 4311), True, 'import tensorflow.contrib.layers as layers\n'), ((4441, 4472), 'tensorflow.greater_equal', 'tf.greater_equal', (['input_text', '(0)'], {}), '(input_text, 0)\n', (4457, 4472), True, 'import tensorflow as tf\n'), ((4656, 4686), 'tensorflow.reduce_max', 'tf.reduce_max', (['sequence_length'], {}), '(sequence_length)\n', (4669, 4686), True, 'import tensorflow as tf\n'), ((4834, 4907), 'tensorflow.split', 'tf.split', (['input_text', '[max_sequence_length, empty_padding_lenght]'], {'axis': '(1)'}), '(input_text, [max_sequence_length, empty_padding_lenght], axis=1)\n', (4842, 4907), True, 'import tensorflow as tf\n'), ((6060, 6120), 'tensorflow.constant', 'tf.constant', (['embeddings'], {'name': '"""embeddings"""', 'dtype': 'tf.float32'}), "(embeddings, name='embeddings', dtype=tf.float32)\n", (6071, 6120), True, 'import tensorflow as tf\n'), ((6204, 6231), 'tensorflow.add', 'tf.add', (['input_text_begin', '(1)'], {}), '(input_text_begin, 1)\n', (6210, 6231), True, 'import tensorflow as tf\n'), ((6341, 6356), 'tensorflow.add', 'tf.add', (['gene', '(1)'], {}), '(gene, 1)\n', (6347, 6356), True, 'import tensorflow as tf\n'), ((6377, 6397), 'tensorflow.add', 'tf.add', (['variation', '(1)'], {}), '(variation, 1)\n', (6383, 6397), True, 'import tensorflow as tf\n'), ((6432, 6484), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'input_text_begin'], {}), '(embeddings, input_text_begin)\n', (6454, 6484), True, 'import tensorflow as tf\n'), ((6690, 6730), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'gene'], {}), '(embeddings, gene)\n', (6712, 6730), True, 'import tensorflow as tf\n'), ((6755, 6788), 'tensorflow.squeeze', 'tf.squeeze', (['embedded_gene'], {'axis': '(1)'}), '(embedded_gene, axis=1)\n', (6765, 6788), True, 'import tensorflow as tf\n'), ((6818, 6863), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'variation'], {}), '(embeddings, variation)\n', (6840, 6863), True, 'import tensorflow as tf\n'), ((6893, 6935), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['embedded_variation'], {'axis': '(1)'}), '(embedded_variation, axis=1)\n', (6907, 6935), True, 'import tensorflow as tf\n'), ((7798, 7876), 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'axis': '(-1)', 'depth': 'output_classes', 'on_value': '(1.0)', 'off_value': '(0.0)'}), '(labels, axis=-1, depth=output_classes, on_value=1.0, off_value=0.0)\n', (7808, 7876), True, 'import tensorflow as tf\n'), ((7895, 7922), 'tensorflow.squeeze', 'tf.squeeze', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (7905, 7922), True, 'import tensorflow as tf\n'), ((8307, 8377), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'targets', 'logits': 'logits'}), '(labels=targets, logits=logits)\n', (8346, 8377), True, 'import tensorflow as tf\n'), ((8393, 8413), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (8407, 8413), True, 'import tensorflow as tf\n'), ((9224, 9381), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate_initial', 'global_step', 'learning_rate_decay_steps', 'learning_rate_decay'], {'staircase': '(True)', 'name': '"""learning_rate"""'}), "(learning_rate_initial, global_step,\n learning_rate_decay_steps, learning_rate_decay, staircase=True, name=\n 'learning_rate')\n", (9250, 9381), True, 'import tensorflow as tf\n'), ((9566, 9606), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (9591, 9606), True, 'import tensorflow as tf\n'), ((1447, 1498), 'tensorflow.reshape', 'tf.reshape', (['input_text_end', '[batch_size, MAX_WORDS]'], {}), '(input_text_end, [batch_size, MAX_WORDS])\n', (1457, 1498), True, 'import tensorflow as tf\n'), ((1822, 1855), 'tensorflow.shape', 'tf.shape', (['embedded_sequence_begin'], {}), '(embedded_sequence_begin)\n', (1830, 1855), True, 'import tensorflow as tf\n'), ((1871, 1902), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""text_begin"""'], {}), "('text_begin')\n", (1888, 1902), True, 'import tensorflow as tf\n'), ((2338, 2383), 'tensorflow.concat', 'tf.concat', (['[output_begin, output_end]'], {'axis': '(1)'}), '([output_begin, output_end], axis=1)\n', (2347, 2383), True, 'import tensorflow as tf\n'), ((3025, 3059), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['num_hidden'], {}), '(num_hidden)\n', (3047, 3059), True, 'import tensorflow as tf\n'), ((4549, 4572), 'tensorflow.cast', 'tf.cast', (['mask', 'tf.int32'], {}), '(mask, tf.int32)\n', (4556, 4572), True, 'import tensorflow as tf\n'), ((4715, 4735), 'tensorflow.shape', 'tf.shape', (['input_text'], {}), '(input_text)\n', (4723, 4735), True, 'import tensorflow as tf\n'), ((6300, 6325), 'tensorflow.add', 'tf.add', (['input_text_end', '(1)'], {}), '(input_text_end, 1)\n', (6306, 6325), True, 'import tensorflow as tf\n'), ((6560, 6610), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'input_text_end'], {}), '(embeddings, input_text_end)\n', (6582, 6610), True, 'import tensorflow as tf\n'), ((7208, 7318), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.batch_norm]'], {'decay': 'batch_norm_decay', 'epsilon': 'batch_norm_epsilon', 'activation_fn': 'None'}), '([slim.batch_norm], decay=batch_norm_decay, epsilon=\n batch_norm_epsilon, activation_fn=None)\n', (7222, 7318), False, 'from tensorflow.contrib import slim\n'), ((2123, 2152), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""text_end"""'], {}), "('text_end')\n", (2140, 2152), True, 'import tensorflow as tf\n'), ((3108, 3169), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['cell'], {'output_keep_prob': 'dropout'}), '(cell, output_keep_prob=dropout)\n', (3137, 3169), True, 'import tensorflow as tf\n'), ((3704, 3724), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (3712, 3724), True, 'import tensorflow as tf\n')]
|
import urllib.request as req
import ventura._hpath as hpath
import os
def download(url, file_name):
response = req.urlopen(url)
# Create folders if need
hpath.create_dir(file_name)
file = open(file_name,'wb')
file.write(response.read())
file.close()
def get_page(url):
response = req.urlopen(url)
return response.read().decode("utf8")
|
[
"ventura._hpath.create_dir",
"urllib.request.urlopen"
] |
[((113, 129), 'urllib.request.urlopen', 'req.urlopen', (['url'], {}), '(url)\n', (124, 129), True, 'import urllib.request as req\n'), ((159, 186), 'ventura._hpath.create_dir', 'hpath.create_dir', (['file_name'], {}), '(file_name)\n', (175, 186), True, 'import ventura._hpath as hpath\n'), ((294, 310), 'urllib.request.urlopen', 'req.urlopen', (['url'], {}), '(url)\n', (305, 310), True, 'import urllib.request as req\n')]
|
import os
from tsserver import app
def get_upload_dir():
return os.path.join(app.root_path, app.config['PHOTOS_UPLOAD_FOLDER'])
|
[
"os.path.join"
] |
[((71, 134), 'os.path.join', 'os.path.join', (['app.root_path', "app.config['PHOTOS_UPLOAD_FOLDER']"], {}), "(app.root_path, app.config['PHOTOS_UPLOAD_FOLDER'])\n", (83, 134), False, 'import os\n')]
|
from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
import Store.views
from Store.models.productModel import *
from .forms import CustomerPaymentForm
from .models import *
from Store.models.productModel import CustomerOrders
def cart_view(request):
try:
the_id = request.session['cart_id'] # prende l'id del profumo
cart = Cart.objects.get(id=the_id)
except:
the_id = None
if the_id is None:
empty_message = "Your cart is empty, please keep shopping"
context = {"empty": True, 'empty_message': empty_message}
else:
new_total = 0.00 # prezzo totale
for item in cart.cartitem_set.all():
line_total = float(item.product.price) * item.quantity # prezzo * quantità
new_total += line_total
request.session['items_total'] = cart.cartitem_set.count()
cart.total = new_total # prezzo totale
cart.save()
context = {"cart": cart}
template = "cart.html"
return render(request, template, context)
def add_to_cart(request, pk):
try:
the_id = request.session['cart_id']
except:
new_cart = Cart()
new_cart.save()
request.session['cart_id'] = new_cart.id
the_id = new_cart.id
cart = Cart.objects.get(id=the_id)
try:
product = Product.objects.get(id=pk)
except Product.DoesNotExist:
pass
except:
pass
if request.method == 'POST':
if product.quantity == 0:
lista_attesa = WaitingListModel.objects.create(product=product, user=request.user)
lista_attesa.save()
return render(request, 'finished_perfumes.html', {'product': product})
elif int(request.POST['qty']) > product.quantity:
return render(request, 'finished_perfumes.html', {'product': product})
else:
qty = request.POST['qty'] # quantità aggiunta al carello del singolo profumo
cart_item = CartItem.objects.create(cart=cart, product=product)
cart_item.quantity = qty
cart_item.save()
return HttpResponseRedirect(reverse("carts:cart_view"))
return HttpResponseRedirect(reverse("carts:cart_view"))
def remove_from_cart(request, id):
try:
the_id = request.session['cart_id']
cart = Cart.objects.get(id=the_id)
except:
the_id = None
return HttpResponseRedirect(reverse("carts:cart_view"))
cartitem = CartItem.objects.get(id=id)
cartitem.delete()
return HttpResponseRedirect(reverse("carts:cart_view"))
def customer_payment(request):
if request.method == 'POST':
form = CustomerPaymentForm(request.POST, instance=request.user)
if not form.is_valid():
return render(request, 'payment.html', {'form': form})
else:
form.save()
cartitem = CartItem.objects.all()
for item in cartitem:
orderdetail = CustomerOrders(user=request.user, product=item.product)
orderdetail.save()
item.product.quantity -= item.quantity
# aggiorno la quantità nel database
Product.objects.filter(id=item.product.pk).update(quantity=item.product.quantity)
cartitem.delete() # quando procedo al pagamento il carrello torna vuoto
request.session['items_total'] = 0
template = "success_payment.html"
context = {"empty": True, 'form': form, 'cartitem': cartitem}
return render(request, template, context)
form = CustomerPaymentForm()
return render(request, 'payment.html', {'form': form})
def success_payment(request):
return render(request, 'success_payment.html')
|
[
"django.shortcuts.render",
"Store.models.productModel.CustomerOrders",
"django.urls.reverse"
] |
[((1033, 1067), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (1039, 1067), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3646, 3693), 'django.shortcuts.render', 'render', (['request', '"""payment.html"""', "{'form': form}"], {}), "(request, 'payment.html', {'form': form})\n", (3652, 3693), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3737, 3776), 'django.shortcuts.render', 'render', (['request', '"""success_payment.html"""'], {}), "(request, 'success_payment.html')\n", (3743, 3776), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((2225, 2251), 'django.urls.reverse', 'reverse', (['"""carts:cart_view"""'], {}), "('carts:cart_view')\n", (2232, 2251), False, 'from django.urls import reverse\n'), ((2582, 2608), 'django.urls.reverse', 'reverse', (['"""carts:cart_view"""'], {}), "('carts:cart_view')\n", (2589, 2608), False, 'from django.urls import reverse\n'), ((1674, 1737), 'django.shortcuts.render', 'render', (['request', '"""finished_perfumes.html"""', "{'product': product}"], {}), "(request, 'finished_perfumes.html', {'product': product})\n", (1680, 1737), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((2164, 2190), 'django.urls.reverse', 'reverse', (['"""carts:cart_view"""'], {}), "('carts:cart_view')\n", (2171, 2190), False, 'from django.urls import reverse\n'), ((2799, 2846), 'django.shortcuts.render', 'render', (['request', '"""payment.html"""', "{'form': form}"], {}), "(request, 'payment.html', {'form': form})\n", (2805, 2846), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3566, 3600), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (3572, 3600), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((1816, 1879), 'django.shortcuts.render', 'render', (['request', '"""finished_perfumes.html"""', "{'product': product}"], {}), "(request, 'finished_perfumes.html', {'product': product})\n", (1822, 1879), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((2456, 2482), 'django.urls.reverse', 'reverse', (['"""carts:cart_view"""'], {}), "('carts:cart_view')\n", (2463, 2482), False, 'from django.urls import reverse\n'), ((2995, 3050), 'Store.models.productModel.CustomerOrders', 'CustomerOrders', ([], {'user': 'request.user', 'product': 'item.product'}), '(user=request.user, product=item.product)\n', (3009, 3050), False, 'from Store.models.productModel import CustomerOrders\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='GoogleCivicCandidateCampaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')),
('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)),
('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)),
('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)),
('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')),
('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)),
('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')),
('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),
('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)),
('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)),
('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)),
('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)),
('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)),
('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)),
('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)),
('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)),
('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
migrations.CreateModel(
name='GoogleCivicContestOffice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('office', models.CharField(max_length=254, verbose_name=b'google civic office')),
('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)),
('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),
('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)),
('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote for', blank=True)),
('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will be elected', blank=True)),
('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)),
('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)),
('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)),
('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)),
('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')),
('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')),
('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')),
('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
migrations.CreateModel(
name='GoogleCivicContestReferendum',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')),
('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')),
('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')),
('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')),
('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),
('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)),
('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')),
('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')),
('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')),
('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
migrations.CreateModel(
name='GoogleCivicElection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')),
('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)),
('name', models.CharField(max_length=254, verbose_name=b'google civic election name')),
('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')),
('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.URLField",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] |
[((320, 413), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (336, 413), False, 'from django.db import models, migrations\n'), ((437, 514), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic candidate name'"}), "(max_length=254, verbose_name=b'google civic candidate name')\n", (453, 514), False, 'from django.db import models, migrations\n'), ((543, 639), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic party', blank=True)\n", (559, 639), False, 'from django.db import models, migrations\n'), ((667, 766), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic photoUrl'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic photoUrl', blank=True)\n", (683, 766), False, 'from django.db import models, migrations\n'), ((800, 906), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic order on ballot'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic order on ballot', blank=True)\n", (816, 906), False, 'from django.db import models, migrations\n'), ((955, 1057), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic internal temp contest_office_id id'"}), "(max_length=254, verbose_name=\n b'google civic internal temp contest_office_id id')\n", (971, 1057), False, 'from django.db import models, migrations\n'), ((1101, 1207), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote contest_office_id id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote contest_office_id id', blank=True)\n", (1117, 1207), False, 'from django.db import models, migrations\n'), ((1250, 1318), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google election id'"}), "(max_length=254, verbose_name=b'google election id')\n", (1266, 1318), False, 'from django.db import models, migrations\n'), ((1361, 1458), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote election id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote election id', blank=True)\n", (1377, 1458), False, 'from django.db import models, migrations\n'), ((1506, 1613), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote candidate campaign id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote candidate campaign id', blank=True)\n", (1522, 1613), False, 'from django.db import models, migrations\n'), ((1653, 1752), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote politician id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote politician id', blank=True)\n", (1669, 1752), False, 'from django.db import models, migrations\n'), ((1784, 1878), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)', 'verbose_name': "b'website url of candidate campaign'", 'blank': '(True)'}), "(null=True, verbose_name=\n b'website url of candidate campaign', blank=True)\n", (1799, 1878), False, 'from django.db import models, migrations\n'), ((1909, 2004), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)', 'verbose_name': "b'facebook url of candidate campaign'", 'blank': '(True)'}), "(null=True, verbose_name=\n b'facebook url of candidate campaign', blank=True)\n", (1924, 2004), False, 'from django.db import models, migrations\n'), ((2034, 2128), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)', 'verbose_name': "b'twitter url of candidate campaign'", 'blank': '(True)'}), "(null=True, verbose_name=\n b'twitter url of candidate campaign', blank=True)\n", (2049, 2128), False, 'from django.db import models, migrations\n'), ((2162, 2260), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)', 'verbose_name': "b'google plus url of candidate campaign'", 'blank': '(True)'}), "(null=True, verbose_name=\n b'google plus url of candidate campaign', blank=True)\n", (2177, 2260), False, 'from django.db import models, migrations\n'), ((2290, 2384), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)', 'verbose_name': "b'youtube url of candidate campaign'", 'blank': '(True)'}), "(null=True, verbose_name=\n b'youtube url of candidate campaign', blank=True)\n", (2305, 2384), False, 'from django.db import models, migrations\n'), ((2408, 2523), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic candidate campaign email'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic candidate campaign email', blank=True)\n", (2424, 2523), False, 'from django.db import models, migrations\n'), ((2547, 2662), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic candidate campaign email'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic candidate campaign email', blank=True)\n", (2563, 2662), False, 'from django.db import models, migrations\n'), ((2694, 2765), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': "b'is primary election'"}), "(default=False, verbose_name=b'is primary election')\n", (2713, 2765), False, 'from django.db import models, migrations\n'), ((2915, 3008), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (2931, 3008), False, 'from django.db import models, migrations\n'), ((3034, 3103), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic office'"}), "(max_length=254, verbose_name=b'google civic office')\n", (3050, 3103), False, 'from django.db import models, migrations\n'), ((3151, 3253), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic election id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic election id', blank=True)\n", (3167, 3253), False, 'from django.db import models, migrations\n'), ((3291, 3388), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote election id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote election id', blank=True)\n", (3307, 3388), False, 'from django.db import models, migrations\n'), ((3432, 3535), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote contest office id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote contest office id', blank=True)\n", (3448, 3535), False, 'from django.db import models, migrations\n'), ((3571, 3694), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic number of candidates to vote for'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic number of candidates to vote for', blank=True)\n", (3587, 3694), False, 'from django.db import models, migrations\n'), ((3727, 3858), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic number of candidates who will be elected'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic number of candidates who will be elected', blank=True)\n", (3743, 3858), False, 'from django.db import models, migrations\n'), ((3891, 3997), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic level, option 0'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic level, option 0', blank=True)\n", (3907, 3997), False, 'from django.db import models, migrations\n'), ((4030, 4136), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic level, option 1'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic level, option 1', blank=True)\n", (4046, 4136), False, 'from django.db import models, migrations\n'), ((4169, 4275), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic level, option 2'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic level, option 2', blank=True)\n", (4185, 4275), False, 'from django.db import models, migrations\n'), ((4310, 4417), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic ballot placement'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic ballot placement', blank=True)\n", (4326, 4417), False, 'from django.db import models, migrations\n'), ((4449, 4553), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic primary party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic primary party', blank=True)\n", (4465, 4553), False, 'from django.db import models, migrations\n'), ((4585, 4661), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic district name'"}), "(max_length=254, verbose_name=b'google civic district name')\n", (4601, 4661), False, 'from django.db import models, migrations\n'), ((4699, 4776), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic district scope'"}), "(max_length=254, verbose_name=b'google civic district scope')\n", (4715, 4776), False, 'from django.db import models, migrations\n'), ((4815, 4893), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic district ocd id'"}), "(max_length=254, verbose_name=b'google civic district ocd id')\n", (4831, 4893), False, 'from django.db import models, migrations\n'), ((4942, 5046), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic primary party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic primary party', blank=True)\n", (4958, 5046), False, 'from django.db import models, migrations\n'), ((5072, 5176), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic primary party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic primary party', blank=True)\n", (5088, 5176), False, 'from django.db import models, migrations\n'), ((5208, 5279), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': "b'is primary election'"}), "(default=False, verbose_name=b'is primary election')\n", (5227, 5279), False, 'from django.db import models, migrations\n'), ((5433, 5526), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (5449, 5526), False, 'from django.db import models, migrations\n'), ((5562, 5641), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic referendum title'"}), "(max_length=254, verbose_name=b'google civic referendum title')\n", (5578, 5641), False, 'from django.db import models, migrations\n'), ((5684, 5771), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic referendum subtitle'"}), "(max_length=254, verbose_name=\n b'google civic referendum subtitle')\n", (5700, 5771), False, 'from django.db import models, migrations\n'), ((5804, 5905), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic referendum details url'"}), "(max_length=254, null=True, verbose_name=\n b'google civic referendum details url')\n", (5820, 5905), False, 'from django.db import models, migrations\n'), ((5948, 6022), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic election id'"}), "(max_length=254, verbose_name=b'google civic election id')\n", (5964, 6022), False, 'from django.db import models, migrations\n'), ((6065, 6162), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'we vote election id'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'we vote election id', blank=True)\n", (6081, 6162), False, 'from django.db import models, migrations\n'), ((6197, 6304), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic ballot placement'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic ballot placement', blank=True)\n", (6213, 6304), False, 'from django.db import models, migrations\n'), ((6336, 6440), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic primary party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic primary party', blank=True)\n", (6352, 6440), False, 'from django.db import models, migrations\n'), ((6472, 6548), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic district name'"}), "(max_length=254, verbose_name=b'google civic district name')\n", (6488, 6548), False, 'from django.db import models, migrations\n'), ((6586, 6663), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic district scope'"}), "(max_length=254, verbose_name=b'google civic district scope')\n", (6602, 6663), False, 'from django.db import models, migrations\n'), ((6702, 6780), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic district ocd id'"}), "(max_length=254, verbose_name=b'google civic district ocd id')\n", (6718, 6780), False, 'from django.db import models, migrations\n'), ((6829, 6933), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic primary party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic primary party', blank=True)\n", (6845, 6933), False, 'from django.db import models, migrations\n'), ((6959, 7063), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'verbose_name': "b'google civic primary party'", 'blank': '(True)'}), "(max_length=254, null=True, verbose_name=\n b'google civic primary party', blank=True)\n", (6975, 7063), False, 'from django.db import models, migrations\n'), ((7095, 7166), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': "b'is primary election'"}), "(default=False, verbose_name=b'is primary election')\n", (7114, 7166), False, 'from django.db import models, migrations\n'), ((7311, 7404), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (7327, 7404), False, 'from django.db import models, migrations\n'), ((7448, 7539), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(20)', 'verbose_name': "b'google civic election id'"}), "(unique=True, max_length=20, verbose_name=\n b'google civic election id')\n", (7464, 7539), False, 'from django.db import models, migrations\n'), ((7577, 7686), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)', 'null': '(True)', 'verbose_name': "b'we vote election id'", 'blank': '(True)'}), "(max_length=20, unique=True, null=True, verbose_name=\n b'we vote election id', blank=True)\n", (7593, 7686), False, 'from django.db import models, migrations\n'), ((7709, 7785), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic election name'"}), "(max_length=254, verbose_name=b'google civic election name')\n", (7725, 7785), False, 'from django.db import models, migrations\n'), ((7821, 7896), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'verbose_name': "b'google civic election day'"}), "(max_length=254, verbose_name=b'google civic election day')\n", (7837, 7896), False, 'from django.db import models, migrations\n'), ((7933, 8004), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': "b'is primary election'"}), "(default=False, verbose_name=b'is primary election')\n", (7952, 8004), False, 'from django.db import models, migrations\n')]
|
import unittest
from unittest.mock import patch
from nose.tools import eq_
from buoy.client.network import ip
class MockResponse:
def __init__(self, **kwargs):
self.content = str.encode(kwargs.pop('content', ""))
self.status_code = kwargs.pop('status_code', 404)
class TestPublicIP(unittest.TestCase):
def setUp(self):
self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com',
'https://api.ipify1.org', 'http://ip.42.pl/raw']
@patch.object(ip, 'get')
def test_get_public_ip_return_ip_in_last_service(self, mock_method):
service_ok = self.services[-1]
max_attempts = len(self.services)
ip_expected = "172.16.58.3"
def mocked_requests_get(*args, **kwargs):
mock_resp = MockResponse()
if args[0] == service_ok:
mock_resp = MockResponse(content=ip_expected, status_code=200)
return mock_resp
mock_method.side_effect = mocked_requests_get
eq_(ip_expected, ip.get_public_ip(services=self.services))
eq_(mock_method.call_count, max_attempts)
@patch.object(ip, 'get')
def test_get_public_ip_return_ip_in_first_service(self, mock_method):
service_ok = self.services[0]
max_attempts = 1
ip_expected = "172.16.58.3"
def mocked_requests_get(*args, **kwargs):
mock_resp = MockResponse()
if args[0] == service_ok:
mock_resp = MockResponse(content=ip_expected, status_code=200)
return mock_resp
mock_method.side_effect = mocked_requests_get
eq_(ip_expected, ip.get_public_ip(services=self.services))
eq_(mock_method.call_count, max_attempts)
@patch.object(ip, 'get')
def test_get_public_ip_return_exception(self, mock_method):
max_attempts = len(self.services)
def mocked_requests_get(*args, **kwargs):
return MockResponse()
mock_method.side_effect = mocked_requests_get
self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services)
eq_(mock_method.call_count, max_attempts)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"unittest.mock.patch.object",
"nose.tools.eq_",
"buoy.client.network.ip.get_public_ip"
] |
[((518, 541), 'unittest.mock.patch.object', 'patch.object', (['ip', '"""get"""'], {}), "(ip, 'get')\n", (530, 541), False, 'from unittest.mock import patch\n'), ((1148, 1171), 'unittest.mock.patch.object', 'patch.object', (['ip', '"""get"""'], {}), "(ip, 'get')\n", (1160, 1171), False, 'from unittest.mock import patch\n'), ((1761, 1784), 'unittest.mock.patch.object', 'patch.object', (['ip', '"""get"""'], {}), "(ip, 'get')\n", (1773, 1784), False, 'from unittest.mock import patch\n'), ((2201, 2216), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2214, 2216), False, 'import unittest\n'), ((1100, 1141), 'nose.tools.eq_', 'eq_', (['mock_method.call_count', 'max_attempts'], {}), '(mock_method.call_count, max_attempts)\n', (1103, 1141), False, 'from nose.tools import eq_\n'), ((1713, 1754), 'nose.tools.eq_', 'eq_', (['mock_method.call_count', 'max_attempts'], {}), '(mock_method.call_count, max_attempts)\n', (1716, 1754), False, 'from nose.tools import eq_\n'), ((2126, 2167), 'nose.tools.eq_', 'eq_', (['mock_method.call_count', 'max_attempts'], {}), '(mock_method.call_count, max_attempts)\n', (2129, 2167), False, 'from nose.tools import eq_\n'), ((1050, 1090), 'buoy.client.network.ip.get_public_ip', 'ip.get_public_ip', ([], {'services': 'self.services'}), '(services=self.services)\n', (1066, 1090), False, 'from buoy.client.network import ip\n'), ((1663, 1703), 'buoy.client.network.ip.get_public_ip', 'ip.get_public_ip', ([], {'services': 'self.services'}), '(services=self.services)\n', (1679, 1703), False, 'from buoy.client.network import ip\n')]
|
import json
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from sklearn.preprocessing import Imputer, StandardScaler
import DataSource
import os.path
class NYK(DataSource.DataSource):
def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None):
DataSource.DataSource.__init__(self, app, dsrc_name)
self.dsrc_type = dsrc_type
self.dsrc_path = dsrc_path
self.file_name = file_name
self.header_rows = header_rows
self.date_cols = date_cols
self.skip_rows = skip_rows
self.lat1 = lat1
self.long1 = long1
self.lat2 = lat2
self.long2 = long2
self.read_prepare_data()
self.init_dsrc()
"""These methods are fine-tuned for the current data sets. I need to
generalize them once I know more about different types of data coming
in"""
@classmethod
def clean(cls, df, name):
"""Find all empty space or all NaN columns and drops them from the DataFrame"""
df.replace(r'\s+', np.nan, regex=True, inplace=True)
df.replace(r'-', np.nan, regex=True, inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.columns = [str(x) for x in df.columns]
df.reset_index(level=[0], inplace=True)
df.rename(columns={'index': 'ind'}, inplace=True)
"""This is to find coordinate columns etc. manually, because we don't
know anything about the structure of our data!"""
# df.to_csv('data/'+name+'_clean.csv')
return df
@classmethod
def scale_impute(cls, df, method):
"""Find float columns, impute their NaN values with 'method', and then min-max scale the column/feature"""
fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1)
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform(
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]
)
scaler = StandardScaler()
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform(
df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]
)
return df
@classmethod
def convert_coordinate(cls, df, col_in, col_out):
"""Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd"""
##FIXME! This is assuming all coordinates are E and N
df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int)
return df
@classmethod
def wgs84_to_web_mercator(cls, df, lon, lat):
"""Convert decimal longitude/latitude to Web Mercator format"""
k = 6378137
df['wm%s'%lon] = df[lon] * (k * np.pi/180.0)
df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k
return df
def read_prepare_data(self):
"""Use all data tools above to deliver the final cleaned DataFrame"""
self.data = self.dsrc_types[self.dsrc_type](
os.path.join(self.dsrc_path, self.file_name),
header = self.header_rows,
parse_dates = self.date_cols,
skiprows = self.skip_rows,
error_bad_lines = False,
low_memory = False
)
self.data['timestamp2'] = pd.to_datetime(self.data[0])
self.data['timestamp1'] = pd.to_datetime(self.data[1])
self.clean(self.data, self.dsrc_name)
self.convert_coordinate(self.data, str(self.lat1), 'lat1')
self.convert_coordinate(self.data, str(self.long1), 'long1')
self.convert_coordinate(self.data, str(self.lat2), 'lat2')
self.convert_coordinate(self.data, str(self.long2), 'long2')
self.scale_impute(self.data, 'mean')
self.wgs84_to_web_mercator(self.data, 'long1', 'lat1')
self.wgs84_to_web_mercator(self.data, 'long2', 'lat2')
self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d')
DataSource.DataSource.types['NYK'] = NYK
|
[
"DataSource.DataSource.__init__",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.Imputer",
"numpy.tan",
"pandas.to_datetime"
] |
[((417, 469), 'DataSource.DataSource.__init__', 'DataSource.DataSource.__init__', (['self', 'app', 'dsrc_name'], {}), '(self, app, dsrc_name)\n', (447, 469), False, 'import DataSource\n'), ((1863, 1918), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': 'np.nan', 'strategy': 'method', 'axis': '(1)'}), '(missing_values=np.nan, strategy=method, axis=1)\n', (1870, 1918), False, 'from sklearn.preprocessing import Imputer, StandardScaler\n'), ((2182, 2198), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2196, 2198), False, 'from sklearn.preprocessing import Imputer, StandardScaler\n'), ((3579, 3607), 'pandas.to_datetime', 'pd.to_datetime', (['self.data[0]'], {}), '(self.data[0])\n', (3593, 3607), True, 'import pandas as pd\n'), ((3642, 3670), 'pandas.to_datetime', 'pd.to_datetime', (['self.data[1]'], {}), '(self.data[1])\n', (3656, 3670), True, 'import pandas as pd\n'), ((3055, 3093), 'numpy.tan', 'np.tan', (['((90 + df[lat]) * np.pi / 360.0)'], {}), '((90 + df[lat]) * np.pi / 360.0)\n', (3061, 3093), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Created on 05/07/2018
# @author: <NAME>
# @license: MIT-license
# Purpose: example of multiple agent flocking behavior
# Explanation:
import pygame, sys, random, math
pygame.init()
stopped = False
window_height = 800
window_width = 600
black = (0,0,0)
white = (255,255,255)
class agent:
def __init__(self, x, y):
self.x = x
self.y = y
self.velocityX = 10
self.velocityY = 10
while not stopped:
ev = pygame.event.get()
for event in ev:
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
running = False
|
[
"pygame.event.get",
"pygame.mouse.get_pos",
"pygame.init"
] |
[((195, 208), 'pygame.init', 'pygame.init', ([], {}), '()\n', (206, 208), False, 'import pygame, sys, random, math\n'), ((471, 489), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (487, 489), False, 'import pygame, sys, random, math\n'), ((579, 601), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (599, 601), False, 'import pygame, sys, random, math\n')]
|
import numpy as np
import torch
from .base_wrapper import BaseWrapper
from torch.autograd.functional import hvp, vhp, hessian
from typing import List, Tuple, Dict, Union, Callable
from torch import nn, Tensor
class TorchWrapper(BaseWrapper):
def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'):
self.func = func
# Not very clean...
if 'device' in dir(func):
self.device = func.device
else:
self.device = torch.device(device)
if precision == 'float32':
self.precision = torch.float32
elif precision == 'float64':
self.precision = torch.float64
else:
raise ValueError
self.hvp_func = hvp if hvp_type == 'hvp' else vhp
def get_value_and_grad(self, input_var):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = self._unconcat(torch.tensor(
input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes)
loss = self._eval_func(input_var_)
input_var_grad = input_var_.values() if isinstance(
input_var_, dict) else input_var_
grads = torch.autograd.grad(loss, input_var_grad)
if isinstance(input_var_, dict):
grads = {k: v for k, v in zip(input_var_.keys(), grads)}
return [loss.cpu().detach().numpy().astype(np.float64),
self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)]
def get_hvp(self, input_var, vector):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = self._unconcat(torch.tensor(
input_var, dtype=self.precision, device=self.device), self.shapes)
vector_ = self._unconcat(torch.tensor(
vector, dtype=self.precision, device=self.device), self.shapes)
if isinstance(input_var_, dict):
input_var_ = tuple(input_var_.values())
if isinstance(vector_, dict):
vector_ = tuple(vector_.values())
if isinstance(input_var_, list):
input_var_ = tuple(input_var_)
if isinstance(vector_, list):
vector_ = tuple(vector_)
loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_)
return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64)
def get_hess(self, input_var):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = torch.tensor(
input_var, dtype=self.precision, device=self.device)
def func(inp):
return self._eval_func(self._unconcat(inp, self.shapes))
hess = hessian(func, input_var_, vectorize=False)
return hess.cpu().detach().numpy().astype(np.float64)
def get_ctr_jac(self, input_var):
assert 'shapes' in dir(
self), 'You must first call get input to define the tensors shapes.'
input_var_ = self._unconcat(torch.tensor(
input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes)
ctr_val = self._eval_ctr_func(input_var_)
input_var_grad = input_var_.values() if isinstance(
input_var_, dict) else input_var_
grads = torch.autograd.grad(ctr_val, input_var_grad)
return grads.cpu().detach().numpy().astype(np.float64)
def _reshape(self, t, sh):
if torch.is_tensor(t):
return t.reshape(sh)
elif isinstance(t, np.ndarray):
return np.reshape(t, sh)
else:
raise NotImplementedError
def _tconcat(self, t_list, dim=0):
if torch.is_tensor(t_list[0]):
return torch.cat(t_list, dim)
elif isinstance(t_list[0], np.ndarray):
return np.concatenate(t_list, dim)
else:
raise NotImplementedError
def _gather(self, t, i, j):
if isinstance(t, np.ndarray) or torch.is_tensor(t):
return t[i:j]
else:
raise NotImplementedError
def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None):
"""
A factory to create a function of the torch parameter model.
:param model: torch model
:type model: torch.nn.Modle]
:param loss: a function with signature loss_value = loss(pred_y, true_y).
:type loss: function
:param train_x: dataset used as input of the model
:type train_x: np.ndarray
:param train_y: dataset used as ground truth input of the loss
:type train_y: np.ndarray
:return: (function of the parameters, list of parameters, names of parameters)
:rtype: tuple
"""
# named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()}
params, names = extract_weights(model)
device = params[0].device
prec_ = torch.float32 if precision == 'float32' else torch.float64
if isinstance(train_x, np.ndarray):
train_x = torch.tensor(train_x, dtype=prec_, device=device)
if isinstance(train_y, np.ndarray):
train_y = torch.tensor(train_y, dtype=prec_, device=device)
def func(*new_params):
load_weights(model, {k: v for k, v in zip(names, new_params)})
out = apply_func(model, train_x)
return loss(out, train_y)
func.device = device
return func, [p.cpu().detach().numpy() for p in params], names
def apply_func(func, input_):
if isinstance(input_, dict):
return func(**input_)
elif isinstance(input_, list) or isinstance(input_, tuple):
return func(*input_)
else:
return func(input_)
# Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py
# Utilities to make nn.Module "functional"
# In particular the goal is to be able to provide a function that takes as input
# the parameters and evaluate the nn.Module using fixed inputs.
def _del_nested_attr(obj: nn.Module, names: List[str]) -> None:
"""
Deletes the attribute specified by the given list of names.
For example, to delete the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'])
"""
if len(names) == 1:
delattr(obj, names[0])
else:
_del_nested_attr(getattr(obj, names[0]), names[1:])
def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None:
"""
Set the attribute specified by the given list of names to value.
For example, to set the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'], value)
"""
if len(names) == 1:
setattr(obj, names[0], value)
else:
_set_nested_attr(getattr(obj, names[0]), names[1:], value)
def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]:
"""
This function removes all the Parameters from the model and
return them as a tuple as well as their original attribute names.
The weights must be re-loaded with `load_weights` before the model
can be used again.
Note that this function modifies the model in place and after this
call, mod.parameters() will be empty.
"""
orig_params = [p for p in mod.parameters() if p.requires_grad]
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
if p.requires_grad:
_del_nested_attr(mod, name.split("."))
names.append(name)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in orig_params)
return params, names
def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None:
"""
Reload a set of weights so that `mod` can be used again to perform a forward pass.
Note that the `params` are regular Tensors (that can have history) and so are left
as Tensors. This means that mod.parameters() will still be empty after this call.
"""
for name, p in params.items():
_set_nested_attr(mod, name.split("."), p)
|
[
"torch.autograd.functional.hessian",
"numpy.concatenate",
"torch.autograd.grad",
"torch.cat",
"numpy.reshape",
"torch.device",
"torch.is_tensor",
"torch.tensor"
] |
[((1248, 1289), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'input_var_grad'], {}), '(loss, input_var_grad)\n', (1267, 1289), False, 'import torch\n'), ((2621, 2686), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'device': 'self.device'}), '(input_var, dtype=self.precision, device=self.device)\n', (2633, 2686), False, 'import torch\n'), ((2809, 2851), 'torch.autograd.functional.hessian', 'hessian', (['func', 'input_var_'], {'vectorize': '(False)'}), '(func, input_var_, vectorize=False)\n', (2816, 2851), False, 'from torch.autograd.functional import hvp, vhp, hessian\n'), ((3390, 3434), 'torch.autograd.grad', 'torch.autograd.grad', (['ctr_val', 'input_var_grad'], {}), '(ctr_val, input_var_grad)\n', (3409, 3434), False, 'import torch\n'), ((3542, 3560), 'torch.is_tensor', 'torch.is_tensor', (['t'], {}), '(t)\n', (3557, 3560), False, 'import torch\n'), ((3775, 3801), 'torch.is_tensor', 'torch.is_tensor', (['t_list[0]'], {}), '(t_list[0])\n', (3790, 3801), False, 'import torch\n'), ((5094, 5143), 'torch.tensor', 'torch.tensor', (['train_x'], {'dtype': 'prec_', 'device': 'device'}), '(train_x, dtype=prec_, device=device)\n', (5106, 5143), False, 'import torch\n'), ((5202, 5251), 'torch.tensor', 'torch.tensor', (['train_y'], {'dtype': 'prec_', 'device': 'device'}), '(train_y, dtype=prec_, device=device)\n', (5214, 5251), False, 'import torch\n'), ((491, 511), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (503, 511), False, 'import torch\n'), ((969, 1059), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'requires_grad': '(True)', 'device': 'self.device'}), '(input_var, dtype=self.precision, requires_grad=True, device=\n self.device)\n', (981, 1059), False, 'import torch\n'), ((1741, 1806), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'device': 'self.device'}), '(input_var, dtype=self.precision, device=self.device)\n', (1753, 1806), False, 'import torch\n'), ((1867, 1929), 'torch.tensor', 'torch.tensor', (['vector'], {'dtype': 'self.precision', 'device': 'self.device'}), '(vector, dtype=self.precision, device=self.device)\n', (1879, 1929), False, 'import torch\n'), ((3104, 3194), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'requires_grad': '(True)', 'device': 'self.device'}), '(input_var, dtype=self.precision, requires_grad=True, device=\n self.device)\n', (3116, 3194), False, 'import torch\n'), ((3822, 3844), 'torch.cat', 'torch.cat', (['t_list', 'dim'], {}), '(t_list, dim)\n', (3831, 3844), False, 'import torch\n'), ((4065, 4083), 'torch.is_tensor', 'torch.is_tensor', (['t'], {}), '(t)\n', (4080, 4083), False, 'import torch\n'), ((3654, 3671), 'numpy.reshape', 'np.reshape', (['t', 'sh'], {}), '(t, sh)\n', (3664, 3671), True, 'import numpy as np\n'), ((3912, 3939), 'numpy.concatenate', 'np.concatenate', (['t_list', 'dim'], {}), '(t_list, dim)\n', (3926, 3939), True, 'import numpy as np\n')]
|
import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee
from corehq.apps.unicel.api import UnicelBackend
logger = logging.getLogger('accounting')
class Command(LabelCommand):
help = "bootstrap Unicel gateway fees"
args = ""
label = ""
def handle(self, *labels, **options):
SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50,
currency=Currency.objects.get(code="INR"))
SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50,
currency=Currency.objects.get(code="INR"))
logger.info("Updated Unicel gateway fees.")
|
[
"corehq.apps.accounting.models.Currency.objects.get",
"logging.getLogger",
"corehq.apps.unicel.api.UnicelBackend.get_api_id"
] |
[((291, 322), 'logging.getLogger', 'logging.getLogger', (['"""accounting"""'], {}), "('accounting')\n", (308, 322), False, 'import logging\n'), ((502, 528), 'corehq.apps.unicel.api.UnicelBackend.get_api_id', 'UnicelBackend.get_api_id', ([], {}), '()\n', (526, 528), False, 'from corehq.apps.unicel.api import UnicelBackend\n'), ((655, 681), 'corehq.apps.unicel.api.UnicelBackend.get_api_id', 'UnicelBackend.get_api_id', ([], {}), '()\n', (679, 681), False, 'from corehq.apps.unicel.api import UnicelBackend\n'), ((588, 620), 'corehq.apps.accounting.models.Currency.objects.get', 'Currency.objects.get', ([], {'code': '"""INR"""'}), "(code='INR')\n", (608, 620), False, 'from corehq.apps.accounting.models import Currency\n'), ((741, 773), 'corehq.apps.accounting.models.Currency.objects.get', 'Currency.objects.get', ([], {'code': '"""INR"""'}), "(code='INR')\n", (761, 773), False, 'from corehq.apps.accounting.models import Currency\n')]
|
import os
import os.path
from inspect import getmembers
import manhole
from .log_init import init_logging
from .web import Web
from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl
from .. import api as robusta_api
from .config_loader import ConfigLoader
from ..model.config import Registry
def main():
init_logging()
registry = Registry()
event_handler = PlaybooksEventHandlerImpl(registry)
loader = ConfigLoader(registry, event_handler)
if os.environ.get("ENABLE_MANHOLE", "false").lower() == "true":
manhole.install(locals=dict(getmembers(robusta_api)))
Web.init(event_handler)
Web.run() # blocking
loader.close()
if __name__ == "__main__":
main()
|
[
"os.environ.get",
"inspect.getmembers"
] |
[((495, 536), 'os.environ.get', 'os.environ.get', (['"""ENABLE_MANHOLE"""', '"""false"""'], {}), "('ENABLE_MANHOLE', 'false')\n", (509, 536), False, 'import os\n'), ((592, 615), 'inspect.getmembers', 'getmembers', (['robusta_api'], {}), '(robusta_api)\n', (602, 615), False, 'from inspect import getmembers\n')]
|
from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer
from core.models import Tag, Ingredient, Recipe
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base class for a user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
qs = super(BaseRecipeAttrViewSet, self).get_queryset()
return qs.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
serializer_class = TagSerializer
queryset = Tag.objects.all()
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
serializer_class = IngredientSerializer
queryset = Ingredient.objects.all()
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage Recipe in the database"""
serializer_class = RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# Must override because the old one order by name
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
qs = super(RecipeViewSet, self).get_queryset()
return qs.filter(user=self.request.user)
|
[
"core.models.Ingredient.objects.all",
"core.models.Recipe.objects.all",
"core.models.Tag.objects.all"
] |
[((1090, 1107), 'core.models.Tag.objects.all', 'Tag.objects.all', ([], {}), '()\n', (1105, 1107), False, 'from core.models import Tag, Ingredient, Recipe\n'), ((1263, 1287), 'core.models.Ingredient.objects.all', 'Ingredient.objects.all', ([], {}), '()\n', (1285, 1287), False, 'from core.models import Tag, Ingredient, Recipe\n'), ((1430, 1450), 'core.models.Recipe.objects.all', 'Recipe.objects.all', ([], {}), '()\n', (1448, 1450), False, 'from core.models import Tag, Ingredient, Recipe\n')]
|
""" Serializers used by the sample app """
from authentic.entities import Account
from protean.context import context
from protean_flask.core.serializers import EntitySerializer
from protean_flask.core.serializers import ma
from .entities import Human
class AccountSerializer(EntitySerializer):
""" Serializer for Account Entity"""
id = ma.fields.Integer()
class Meta:
entity = Account
fields = ('id', 'name', 'username', 'email', 'title', 'phone',
'timezone', 'is_locked', 'is_active', 'is_verified')
class HumanSerializer(EntitySerializer):
""" Serializer for Human Entity"""
current_account = ma.fields.Method('get_current_account')
def get_current_account(self, obj):
""" Return the current logged in user """
if context.account:
return context.account.id
else:
return None
class Meta:
entity = Human
|
[
"protean_flask.core.serializers.ma.fields.Integer",
"protean_flask.core.serializers.ma.fields.Method"
] |
[((348, 367), 'protean_flask.core.serializers.ma.fields.Integer', 'ma.fields.Integer', ([], {}), '()\n', (365, 367), False, 'from protean_flask.core.serializers import ma\n'), ((656, 695), 'protean_flask.core.serializers.ma.fields.Method', 'ma.fields.Method', (['"""get_current_account"""'], {}), "('get_current_account')\n", (672, 695), False, 'from protean_flask.core.serializers import ma\n')]
|
#### NOT WORKING
import logging
import configs
import tweepy
import pymongo
import json
# TWITTER PARAMS
HASHTAGS_LIST = configs.HASHTAGS_LIST
# MONGODB PARAMS
MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS
MONGO_COL_USER = configs.MONGO_COL_USER
MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS
MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE
def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user):
logging.info("entrando na funcao get_user_locale_info")
tweet_col = db_connection[mongo_col]
user_data_list = []
user_data = {}
user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1})
for user in user_list:
user_data['hashtag'] = user['hashtag']
user_data['user'] = user['user']
user_data['lang'] = user['lang']
user_data_list.append(user_data)
user_data = {}
user_data = user_data_list
user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data)
return(user_locale_data)
def get_user_info(api, db_connection, mongo_col_user, user_list):
user_col = db_connection[mongo_col_user]
logging.info("entrando na funcao get_user_info")
filtered_user_list = []
insert_ids = []
for user in user_list:
logging.info("looking up for user {0}".format(user['user']))
user_raw = api.get_user(screen_name=user['user'])
user_raw_json = user_raw._json
user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']}
# x = user_col.insert_many(user_locale_list)
insert_ids.append(x.insert_ids)
return(filtered_user_list)
def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list):
user_col = db_connection[mongo_col_user]
count_documents = user_col.count()
if not count_documents == 0:
logging.info("Collection \"{0}\" is not empty. Performing cleanup".format(mongo_col_user))
clean_collection = configs.cleanup_collection(db_connection, mongo_col_user)
logging.info("Collection cleanup: {0} documents were deleted from the collection.".format(clean_collection))
x = user_col.insert_many(user_locale_list)
return(len(x.inserted_ids))
# def group_tweets_by_tag(api, db_connection, hashtags_list):
# print()
# def get_locale_by_tag(api, db_connection, hashtags_list):
# print()
#### NOT WORKING
# def main():
# logging.info("Collecting lang/locale count, per tag, for the given hashtags: {0}".format(' '.join(HASHTAGS_LIST)))
# api_auth = configs.twitter_auth()
# mongodb_connection = configs.mongodb_connect()
# user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS)
# insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list)
# tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST)
# locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST)
# logging.info("Lang/Locale count per tag stored into the collection \"{0}\"".format(MONGO_COL_LOCALE))
# if __name__ == "__main__":
# configs.logging_basic_config()
# main()
|
[
"logging.info",
"configs.cleanup_collection"
] |
[((419, 474), 'logging.info', 'logging.info', (['"""entrando na funcao get_user_locale_info"""'], {}), "('entrando na funcao get_user_locale_info')\n", (431, 474), False, 'import logging\n'), ((1119, 1167), 'logging.info', 'logging.info', (['"""entrando na funcao get_user_info"""'], {}), "('entrando na funcao get_user_info')\n", (1131, 1167), False, 'import logging\n'), ((2008, 2065), 'configs.cleanup_collection', 'configs.cleanup_collection', (['db_connection', 'mongo_col_user'], {}), '(db_connection, mongo_col_user)\n', (2034, 2065), False, 'import configs\n')]
|
# coding=utf-8
import http.client
import hashlib
import urllib
import random
import json
class BaiduTranslate:
appid = '' # 填写你的appid
secretKey = '' # 填写你的密钥
httpClient = None
def __init__(self, appid, secretKey):
self.appid = appid
self.secretKey = secretKey
def translate(self,q):
myurl = '/api/trans/vip/translate'
fromLang = 'auto' # 原文语种
toLang = 'zh' # 译文语种
salt = random.randint(32768, 65536)
q = q
sign = self.appid + q + str(salt) + self.secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(
salt) + '&sign=' + sign
try:
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', myurl)
# response是HTTPResponse对象
response = httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
print (result)
return result['trans_result'][0]['dst']
except Exception as e:
print (e)
finally:
if httpClient:
httpClient.close()
|
[
"urllib.parse.quote",
"random.randint",
"json.loads"
] |
[((450, 478), 'random.randint', 'random.randint', (['(32768)', '(65536)'], {}), '(32768, 65536)\n', (464, 478), False, 'import random\n'), ((1074, 1096), 'json.loads', 'json.loads', (['result_all'], {}), '(result_all)\n', (1084, 1096), False, 'import json\n'), ((663, 684), 'urllib.parse.quote', 'urllib.parse.quote', (['q'], {}), '(q)\n', (681, 684), False, 'import urllib\n')]
|
import requests
from dataset import Dataset
from exceptions import (
InsufficientMetadataError, MethodNotAllowedError, OperationFailedError,
ConnectionError
)
from utils import get_element, get_elements, sanitize
class Dataverse(object):
def __init__(self, connection, collection):
self.connection = connection
self.collection = collection
self._contents_json = None
@property
def is_published(self):
collection_info = requests.get(
self.collection.get('href'),
auth=self.connection.auth,
).content
status_tag = get_element(
collection_info,
namespace="http://purl.org/net/sword/terms/state",
tag="dataverseHasBeenReleased",
)
status = status_tag.text
return status.lower() == 'true'
@property
def alias(self):
return self.collection.get('href').split('/')[-1]
@property
def title(self):
return sanitize(get_element(
self.collection,
namespace='atom',
tag='title',
).text)
def get_contents(self, refresh=False):
if not refresh and self._contents_json:
return self._contents_json
content_uri = 'https://{0}/api/dataverses/{1}/contents'.format(
self.connection.host, self.alias
)
resp = requests.get(
content_uri,
params={'key': self.connection.token}
)
if resp.status_code != 200:
raise ConnectionError('Atom entry could not be retrieved.')
self._contents_json = resp.json()['data']
return self._contents_json
def publish(self):
edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format(
self.connection.host, self.alias
)
resp = requests.post(
edit_uri,
headers={'In-Progress': 'false'},
auth=self.connection.auth,
)
if resp.status_code != 200:
raise OperationFailedError('The Dataverse could not be published.')
def add_dataset(self, dataset):
if get_element(dataset._entry, 'title', 'dcterms') is None:
raise InsufficientMetadataError('This dataset must have a title.')
if get_element(dataset._entry, 'description', 'dcterms') is None:
raise InsufficientMetadataError('This dataset must have a description.')
if get_element(dataset._entry, 'creator', 'dcterms') is None:
raise InsufficientMetadataError('This dataset must have an author.')
resp = requests.post(
self.collection.get('href'),
data=dataset.get_entry(),
headers={'Content-type': 'application/atom+xml'},
auth=self.connection.auth,
)
if resp.status_code != 201:
raise OperationFailedError('This dataset could not be added.')
dataset.dataverse = self
dataset._refresh(receipt=resp.content)
def delete_dataset(self, dataset):
if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED':
return
resp = requests.delete(
dataset.edit_uri,
auth=self.connection.auth,
)
if resp.status_code == 405:
raise MethodNotAllowedError('Published datasets can only be '
'deleted from the GUI. For more information, please refer to '
'https://github.com/IQSS/dataverse/issues/778')
dataset._state = 'DEACCESSIONED'
def get_datasets(self):
collection_info = requests.get(
self.collection.get('href'),
auth=self.connection.auth,
).content
entries = get_elements(collection_info, tag='entry')
return [Dataset.from_dataverse(entry, self) for entry in entries]
def get_dataset_by_doi(self, doi):
return next((s for s in self.get_datasets() if s.doi == doi), None)
def get_dataset_by_title(self, title):
return next((s for s in self.get_datasets() if s.title == title), None)
def get_dataset_by_string_in_entry(self, string):
return next((s for s in self.get_datasets() if string in s.get_entry()), None)
|
[
"exceptions.MethodNotAllowedError",
"exceptions.InsufficientMetadataError",
"utils.get_elements",
"requests.delete",
"dataset.Dataset.from_dataverse",
"requests.get",
"requests.post",
"exceptions.ConnectionError",
"exceptions.OperationFailedError",
"utils.get_element"
] |
[((610, 726), 'utils.get_element', 'get_element', (['collection_info'], {'namespace': '"""http://purl.org/net/sword/terms/state"""', 'tag': '"""dataverseHasBeenReleased"""'}), "(collection_info, namespace=\n 'http://purl.org/net/sword/terms/state', tag='dataverseHasBeenReleased')\n", (621, 726), False, 'from utils import get_element, get_elements, sanitize\n'), ((1384, 1448), 'requests.get', 'requests.get', (['content_uri'], {'params': "{'key': self.connection.token}"}), "(content_uri, params={'key': self.connection.token})\n", (1396, 1448), False, 'import requests\n'), ((1866, 1955), 'requests.post', 'requests.post', (['edit_uri'], {'headers': "{'In-Progress': 'false'}", 'auth': 'self.connection.auth'}), "(edit_uri, headers={'In-Progress': 'false'}, auth=self.\n connection.auth)\n", (1879, 1955), False, 'import requests\n'), ((3183, 3243), 'requests.delete', 'requests.delete', (['dataset.edit_uri'], {'auth': 'self.connection.auth'}), '(dataset.edit_uri, auth=self.connection.auth)\n', (3198, 3243), False, 'import requests\n'), ((3769, 3811), 'utils.get_elements', 'get_elements', (['collection_info'], {'tag': '"""entry"""'}), "(collection_info, tag='entry')\n", (3781, 3811), False, 'from utils import get_element, get_elements, sanitize\n'), ((1538, 1591), 'exceptions.ConnectionError', 'ConnectionError', (['"""Atom entry could not be retrieved."""'], {}), "('Atom entry could not be retrieved.')\n", (1553, 1591), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((2053, 2114), 'exceptions.OperationFailedError', 'OperationFailedError', (['"""The Dataverse could not be published."""'], {}), "('The Dataverse could not be published.')\n", (2073, 2114), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((2163, 2210), 'utils.get_element', 'get_element', (['dataset._entry', '"""title"""', '"""dcterms"""'], {}), "(dataset._entry, 'title', 'dcterms')\n", (2174, 2210), False, 'from utils import get_element, get_elements, sanitize\n'), ((2238, 2298), 'exceptions.InsufficientMetadataError', 'InsufficientMetadataError', (['"""This dataset must have a title."""'], {}), "('This dataset must have a title.')\n", (2263, 2298), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((2310, 2363), 'utils.get_element', 'get_element', (['dataset._entry', '"""description"""', '"""dcterms"""'], {}), "(dataset._entry, 'description', 'dcterms')\n", (2321, 2363), False, 'from utils import get_element, get_elements, sanitize\n'), ((2391, 2457), 'exceptions.InsufficientMetadataError', 'InsufficientMetadataError', (['"""This dataset must have a description."""'], {}), "('This dataset must have a description.')\n", (2416, 2457), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((2469, 2518), 'utils.get_element', 'get_element', (['dataset._entry', '"""creator"""', '"""dcterms"""'], {}), "(dataset._entry, 'creator', 'dcterms')\n", (2480, 2518), False, 'from utils import get_element, get_elements, sanitize\n'), ((2546, 2608), 'exceptions.InsufficientMetadataError', 'InsufficientMetadataError', (['"""This dataset must have an author."""'], {}), "('This dataset must have an author.')\n", (2571, 2608), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((2885, 2941), 'exceptions.OperationFailedError', 'OperationFailedError', (['"""This dataset could not be added."""'], {}), "('This dataset could not be added.')\n", (2905, 2941), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((3333, 3503), 'exceptions.MethodNotAllowedError', 'MethodNotAllowedError', (['"""Published datasets can only be deleted from the GUI. For more information, please refer to https://github.com/IQSS/dataverse/issues/778"""'], {}), "(\n 'Published datasets can only be deleted from the GUI. For more information, please refer to https://github.com/IQSS/dataverse/issues/778'\n )\n", (3354, 3503), False, 'from exceptions import InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError\n'), ((3828, 3863), 'dataset.Dataset.from_dataverse', 'Dataset.from_dataverse', (['entry', 'self'], {}), '(entry, self)\n', (3850, 3863), False, 'from dataset import Dataset\n'), ((997, 1056), 'utils.get_element', 'get_element', (['self.collection'], {'namespace': '"""atom"""', 'tag': '"""title"""'}), "(self.collection, namespace='atom', tag='title')\n", (1008, 1056), False, 'from utils import get_element, get_elements, sanitize\n')]
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2018–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from mergify_engine import config
from mergify_engine import context
from mergify_engine.tests.functional import base
class TestUpdateAction(base.FunctionalTestBase):
async def test_update_action(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
async def test_update_action_on_closed_pr_deleted_branch(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}, "delete_head_branch": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
# Now merge p2 so p1 is not up to date
await self.add_label(p2["number"], "merge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p1, [])
checks = await ctxt.pull_engine_check_runs
for check in checks:
assert check["conclusion"] == "success", check
|
[
"mergify_engine.context.Context.create",
"yaml.dump"
] |
[((3562, 3614), 'mergify_engine.context.Context.create', 'context.Context.create', (['self.repository_ctxt', 'p1', '[]'], {}), '(self.repository_ctxt, p1, [])\n', (3584, 3614), False, 'from mergify_engine import context\n'), ((1337, 1353), 'yaml.dump', 'yaml.dump', (['rules'], {}), '(rules)\n', (1346, 1353), False, 'import yaml\n'), ((2706, 2722), 'yaml.dump', 'yaml.dump', (['rules'], {}), '(rules)\n', (2715, 2722), False, 'import yaml\n')]
|
import msal
import logging
import requests
import json
config = {
"authority": "https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3",
"client_id": "d584a43a-c4c1-4fbe-9c1c-3cae87420e6e",
"scope": [ "https://graph.microsoft.com/.default" ],
"secret": "<KEY>",
"endpoint": "https://graph.microsoft.com/v1.0/users"
}
# Create a preferably long-lived app instance that maintains a token cache.
app = msal.ConfidentialClientApplication(
config["client_id"], authority=config["authority"],
client_credential=config["secret"]
)
# The pattern to acquire a token looks like this.
result = None
# First, the code looks up a token from the cache.
# Because we're looking for a token for the current app, not for a user,
# use None for the account parameter.
result = app.acquire_token_silent(config["scope"], account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = app.acquire_token_for_client(scopes=config["scope"])
if "access_token" in result:
# Call a protected API with the access token.
endpoint_root = 'https://graph.microsoft.com/v1.0'
http_headers = {
'Authorization' : 'Bearer ' + result['access_token'],
'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
# Look for our site
siteName='MFPersonal'
endpoint = '{}/sites?search={}'.format(endpoint_root, siteName)
siteq = requests.get(endpoint, headers=http_headers, stream=False).json()
# We may not have a site
try:
our_site = None
for a_site in siteq['value']:
if a_site['name'] == siteName:
our_site = a_site
break
list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List')
the_list = requests.get(list_ep, headers=http_headers, stream=False).json()
listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id'])
the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json()
an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id'])
an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json()
new_item = {
'fields': {
'Title' : 'Another item',
'testfield' : 'another test field'
}
}
payload = json.dumps(new_item)
new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id'])
make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json()
except Error as e:
print(str(e))
print(result["token_type"])
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id")) # You might need this when reporting a bug.
|
[
"json.dumps",
"logging.info",
"requests.get",
"requests.post",
"msal.ConfidentialClientApplication"
] |
[((439, 566), 'msal.ConfidentialClientApplication', 'msal.ConfidentialClientApplication', (["config['client_id']"], {'authority': "config['authority']", 'client_credential': "config['secret']"}), "(config['client_id'], authority=config[\n 'authority'], client_credential=config['secret'])\n", (473, 566), False, 'import msal\n'), ((889, 974), 'logging.info', 'logging.info', (['"""No suitable token exists in cache. Let\'s get a new one from AAD."""'], {}), '("No suitable token exists in cache. Let\'s get a new one from AAD."\n )\n', (901, 974), False, 'import logging\n'), ((2496, 2516), 'json.dumps', 'json.dumps', (['new_item'], {}), '(new_item)\n', (2506, 2516), False, 'import json\n'), ((1481, 1539), 'requests.get', 'requests.get', (['endpoint'], {'headers': 'http_headers', 'stream': '(False)'}), '(endpoint, headers=http_headers, stream=False)\n', (1493, 1539), False, 'import requests\n'), ((1864, 1921), 'requests.get', 'requests.get', (['list_ep'], {'headers': 'http_headers', 'stream': '(False)'}), '(list_ep, headers=http_headers, stream=False)\n', (1876, 1921), False, 'import requests\n'), ((2055, 2116), 'requests.get', 'requests.get', (['listitem_ep'], {'headers': 'http_headers', 'stream': '(False)'}), '(listitem_ep, headers=http_headers, stream=False)\n', (2067, 2116), False, 'import requests\n'), ((2248, 2308), 'requests.get', 'requests.get', (['an_item_ep'], {'headers': 'http_headers', 'stream': '(False)'}), '(an_item_ep, headers=http_headers, stream=False)\n', (2260, 2308), False, 'import requests\n'), ((2646, 2722), 'requests.post', 'requests.post', (['new_item_ep'], {'headers': 'http_headers', 'data': 'payload', 'stream': '(False)'}), '(new_item_ep, headers=http_headers, data=payload, stream=False)\n', (2659, 2722), False, 'import requests\n')]
|
# Copyright (c) 2014-2021, Dr <NAME>, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the Vector3D object.
"""
import unittest
import numpy as np
# from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra
from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra
class TestTetrahedra(unittest.TestCase):
def test_inside_tetrahedra(self):
"""Tests the inside tetrahedra algorithm."""
# defining triangle vertices
v1x, v1y, v1z = 0, 0, 0
v2x, v2y, v2z = 1, 0, 0
v3x, v3y, v3z = 0, 1, 0
v4x, v4y, v4z = 0, 0, 1
# test vertices are inside
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z))
# check line segments are inside
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5))
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0))
# check an interior point
self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25))
# check an exterior point
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0))
self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra"
] |
[((4500, 4515), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4513, 4515), False, 'import unittest\n'), ((2241, 2337), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', 'v1x', 'v1y', 'v1z'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, v1x, v1y, v1z)\n', (2258, 2337), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((2359, 2455), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', 'v2x', 'v2y', 'v2z'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, v2x, v2y, v2z)\n', (2376, 2455), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((2477, 2573), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', 'v3x', 'v3y', 'v3z'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, v3x, v3y, v3z)\n', (2494, 2573), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((2595, 2691), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', 'v4x', 'v4y', 'v4z'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, v4x, v4y, v4z)\n', (2612, 2691), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((2755, 2847), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.5)', '(0)', '(0)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.5, 0, 0)\n', (2772, 2847), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((2869, 2961), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0)', '(0.5)', '(0)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0, 0.5, 0)\n', (2886, 2961), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((2983, 3075), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0)', '(0)', '(0.5)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0, 0, 0.5)\n', (3000, 3075), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3097, 3191), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.5)', '(0)', '(0.5)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.5, 0, 0.5)\n', (3114, 3191), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3213, 3307), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0)', '(0.5)', '(0.5)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0, 0.5, 0.5)\n', (3230, 3307), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3329, 3423), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.5)', '(0.5)', '(0)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.5, 0.5, 0)\n', (3346, 3423), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3480, 3579), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.25)', '(0.25)', '(0.25)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.25, 0.25, 0.25)\n', (3497, 3579), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3637, 3736), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(-0.5)', '(-0.5)', '(-0.5)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, -0.5, -0.5, -0.5)\n', (3654, 3736), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3759, 3857), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.5)', '(-0.01)', '(0.5)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.5, -0.01, 0.5)\n', (3776, 3857), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((3880, 3978), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(-0.01)', '(0.5)', '(0.5)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, -0.01, 0.5, 0.5)\n', (3897, 3978), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((4001, 4101), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.25)', '(0.25)', '(-0.01)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.25, 0.25, -0.01)\n', (4018, 4101), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((4124, 4223), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.5)', '(0.5)', '(0.0001)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.5, 0.5, 0.0001)\n', (4141, 4223), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((4246, 4342), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(1.0)', '(1.0)', '(1.0)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 1.0, 1.0, 1.0)\n', (4263, 4342), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n'), ((4365, 4469), 'raysect.core.math.cython.tetrahedra._test_barycentric_inside_tetrahedra', 'inside_tetrahedra', (['v1x', 'v1y', 'v1z', 'v2x', 'v2y', 'v2z', 'v3x', 'v3y', 'v3z', 'v4x', 'v4y', 'v4z', '(0.3333)', '(0.3333)', '(0.335)'], {}), '(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,\n v4z, 0.3333, 0.3333, 0.335)\n', (4382, 4469), True, 'from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra\n')]
|
from django.db import models
from django.contrib.auth.models import User
from blog.models import Post
class Comment(models.Model):
STATUS_ITEMS = (
(1,'正常'),
(2,'删除'),
)
target = models.CharField(max_length=200, null=True, verbose_name='评论目标')
content = models.CharField(max_length=2000, verbose_name='内容')
nickname = models.CharField(max_length=50, verbose_name='别名')
status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态')
websit = models.URLField(verbose_name='网址')
email = models.EmailField(verbose_name='邮箱')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
class Meta():
verbose_name = verbose_name_plural = '评论'
def __str__(self):
return '{}'.format(self.target)
def nickname_show(self):
return '来自{}的评论'.format(self.nickname)
nickname_show.short_description = '评论者'
|
[
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.EmailField",
"django.db.models.DateTimeField"
] |
[((211, 275), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)', 'verbose_name': '"""评论目标"""'}), "(max_length=200, null=True, verbose_name='评论目标')\n", (227, 275), False, 'from django.db import models\n'), ((290, 342), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)', 'verbose_name': '"""内容"""'}), "(max_length=2000, verbose_name='内容')\n", (306, 342), False, 'from django.db import models\n'), ((358, 408), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""别名"""'}), "(max_length=50, verbose_name='别名')\n", (374, 408), False, 'from django.db import models\n'), ((422, 501), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)', 'choices': 'STATUS_ITEMS', 'verbose_name': '"""状态"""'}), "(default=1, choices=STATUS_ITEMS, verbose_name='状态')\n", (449, 501), False, 'from django.db import models\n'), ((515, 549), 'django.db.models.URLField', 'models.URLField', ([], {'verbose_name': '"""网址"""'}), "(verbose_name='网址')\n", (530, 549), False, 'from django.db import models\n'), ((562, 598), 'django.db.models.EmailField', 'models.EmailField', ([], {'verbose_name': '"""邮箱"""'}), "(verbose_name='邮箱')\n", (579, 598), False, 'from django.db import models\n'), ((618, 678), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""创建时间"""'}), "(auto_now_add=True, verbose_name='创建时间')\n", (638, 678), False, 'from django.db import models\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from mmdet import _Custom as _C
from apex import amp
class _RROIAlign(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0):
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
output = _C.rotate_roi_align_forward(
features, rois, spatial_scale, out_h, out_w, sample_num
)
# return output, argmax # DEBUG ONLY
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
assert (feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = _C.rotate_roi_align_backward(
grad_output.contiguous(),
rois,
spatial_scale,
out_h,
out_w,
batch_size,
num_channels,
data_height,
data_width,
sample_num
)
return grad_input, grad_rois, None, None, None
rroi_align = _RROIAlign.apply
class RROIAlign(nn.Module):
def __init__(self, out_size, spatial_scale, sample_num=0):
super(RROIAlign, self).__init__()
self.out_size = out_size
self.spatial_scale = spatial_scale
self.sample_num = sample_num
@amp.float_function
def forward(self, features, rois):
return rroi_align(
features, rois, self.out_size, self.spatial_scale, self.sample_num
)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(
self.out_size, self.spatial_scale, self.sample_num)
return format_str
|
[
"mmdet._Custom.rotate_roi_align_forward",
"torch.nn.modules.utils._pair"
] |
[((455, 470), 'torch.nn.modules.utils._pair', '_pair', (['out_size'], {}), '(out_size)\n', (460, 470), False, 'from torch.nn.modules.utils import _pair\n'), ((716, 804), 'mmdet._Custom.rotate_roi_align_forward', '_C.rotate_roi_align_forward', (['features', 'rois', 'spatial_scale', 'out_h', 'out_w', 'sample_num'], {}), '(features, rois, spatial_scale, out_h, out_w,\n sample_num)\n', (743, 804), True, 'from mmdet import _Custom as _C\n')]
|
from torch import nn
from attrdict import AttrDict
import torch
import torch.nn.functional as F
from lie_conv.dynamicsTrainer import Partial
from torchdiffeq import odeint
from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH
class DynamicsPredictor(nn.Module):
"""This class implements forward pass through our model, including loss computation."""
def __init__(self, predictor, debug=False, task="spring", model_with_dict=True):
super().__init__()
self.predictor = predictor
self.debug = debug
self.task = task
self.model_with_dict = model_with_dict
if self.debug:
print("DynamicsPredictor is in DEBUG MODE.")
def _rollout_model(self, z0, ts, sys_params, tol=1e-4):
"""inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)]
outputs pred_zs: (bs, T, z_dim)"""
dynamics = Partial(self.predictor, sysP=sys_params)
zs = odeint(dynamics, z0, ts[0], rtol=tol, method="rk4")
return zs.permute(1, 0, 2)
def forward(self, data):
o = AttrDict()
(z0, sys_params, ts), true_zs = data
pred_zs = self._rollout_model(z0, ts, sys_params)
mse = (pred_zs - true_zs).pow(2).mean()
if self.debug:
if self.task == "spring":
# currently a bit inefficient to do the below?
with torch.no_grad():
(z0, sys_params, ts), true_zs = data
z = z0
m = sys_params[..., 0] # assume the first component encodes masses
D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim
q = z[:, : D // 2].reshape(*m.shape, -1)
p = z[:, D // 2 :].reshape(*m.shape, -1)
V_pred = self.predictor.compute_V((q, sys_params))
k = sys_params[..., 1]
V_true = SpringV(q, k)
mse_V = (V_pred - V_true).pow(2).mean()
# dynamics
dyn_tz_pred = self.predictor(ts, z0, sys_params)
H = lambda t, z: SpringH(
z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1)
)
dynamics = HamiltonianDynamics(H, wgrad=False)
dyn_tz_true = dynamics(ts, z0)
mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean()
o.mse_dyn = mse_dyn
o.mse_V = mse_V
o.prediction = pred_zs
o.mse = mse
o.loss = mse # loss wrt which we train the model
if self.debug:
o.reports = AttrDict({"mse": o.mse, "mse_V": o.mse_V, "mse_dyn": o.mse_dyn})
else:
o.reports = AttrDict({"mse": o.mse})
if not self.model_with_dict:
return pred_zs
return o
|
[
"lie_conv.dynamicsTrainer.Partial",
"lie_conv.hamiltonian.SpringV",
"torchdiffeq.odeint",
"torch.no_grad",
"lie_conv.hamiltonian.HamiltonianDynamics",
"attrdict.AttrDict"
] |
[((917, 957), 'lie_conv.dynamicsTrainer.Partial', 'Partial', (['self.predictor'], {'sysP': 'sys_params'}), '(self.predictor, sysP=sys_params)\n', (924, 957), False, 'from lie_conv.dynamicsTrainer import Partial\n'), ((971, 1022), 'torchdiffeq.odeint', 'odeint', (['dynamics', 'z0', 'ts[0]'], {'rtol': 'tol', 'method': '"""rk4"""'}), "(dynamics, z0, ts[0], rtol=tol, method='rk4')\n", (977, 1022), False, 'from torchdiffeq import odeint\n'), ((1100, 1110), 'attrdict.AttrDict', 'AttrDict', ([], {}), '()\n', (1108, 1110), False, 'from attrdict import AttrDict\n'), ((2689, 2753), 'attrdict.AttrDict', 'AttrDict', (["{'mse': o.mse, 'mse_V': o.mse_V, 'mse_dyn': o.mse_dyn}"], {}), "({'mse': o.mse, 'mse_V': o.mse_V, 'mse_dyn': o.mse_dyn})\n", (2697, 2753), False, 'from attrdict import AttrDict\n'), ((2792, 2816), 'attrdict.AttrDict', 'AttrDict', (["{'mse': o.mse}"], {}), "({'mse': o.mse})\n", (2800, 2816), False, 'from attrdict import AttrDict\n'), ((1410, 1425), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1423, 1425), False, 'import torch\n'), ((1944, 1957), 'lie_conv.hamiltonian.SpringV', 'SpringV', (['q', 'k'], {}), '(q, k)\n', (1951, 1957), False, 'from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH\n'), ((2310, 2345), 'lie_conv.hamiltonian.HamiltonianDynamics', 'HamiltonianDynamics', (['H'], {'wgrad': '(False)'}), '(H, wgrad=False)\n', (2329, 2345), False, 'from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH\n')]
|
import numpy as np
from utils import pick_discrete
class PseudoMarginalData(object):
def __init__(self, data, interim_prior):
# Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1
# interim_prior should have dims [NOBJ, NSAMPLE]
self.data = data
self.interim_prior = interim_prior
if self.data.ndim == 2:
self.nobj, self.nsample = self.data.shape
else:
self.nobj, self.nsample, self.ndim = self.data.shape
if self.interim_prior.shape != (self.nobj, self.nsample):
ds = self.data.shape
ips = self.interim_prior.shape
raise ValueError(("data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]" +
" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]")
.format(ds[0], ds[1], ds[2], ips[0], ips[2]))
def __len__(self):
return self.nobj
def __getitem__(self, index):
import numbers
cls = type(self)
# *Leave* a shallow axis in the case a single object is requested.
if isinstance(index, numbers.Integral):
return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index])
else:
return cls(self.data[index], self.interim_prior[index])
def random_sample(self):
"""Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior
weights. Needed to compute a posterior object."""
ps = 1./self.interim_prior
ps /= np.sum(ps, axis=1)[:, np.newaxis]
return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)])
class NullManip(object):
def init(self, D):
pass
def __call__(self, D):
return D
def unmanip(self, D):
return D
def update(self, D, phi, c, prior):
pass
|
[
"numpy.sum",
"utils.pick_discrete"
] |
[((1570, 1588), 'numpy.sum', 'np.sum', (['ps'], {'axis': '(1)'}), '(ps, axis=1)\n', (1576, 1588), True, 'import numpy as np\n'), ((1642, 1658), 'utils.pick_discrete', 'pick_discrete', (['p'], {}), '(p)\n', (1655, 1658), False, 'from utils import pick_discrete\n')]
|
#!/usr/bin/python3
"""
This module contains classes for parsing the imgur.com site. It consists of three
classes:
~ ImgurException
~ ImgurFileFormats
~ Imgur
Imgur is the main class and it obtains list of direct image urls that could be
used to download images. Example usage:
```python3
imgur = Imgur('http://imgur.com/gallery/vTTHZ')
imgur.prepare_images()
images = imgur.images
```
imgur.images is a deque of two keyed dictionaries. Example usage:
```python3
for image in images:
print(image['url'], image['filename'])
```
If images need to be downloaded in order they appear in an album, their filenames
have to be numerated. Full examples:
```python3
imgur = Imgur('http://imgur.com/gallery/vTTHZ')
imgur.prepare_images()
imgur.images # These are not guaranteed to appear in order when downloaded
imgur.numerate_images()
images = imgur.images
```
Note: For up to date version of this class visit:
https://github.com/petarGitNik/imgur-downloader
"""
import re
from collections import deque
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
__version__ = 'v0.2'
__status__ = 'Development'
class ImgurException(Exception):
"""
This exception is raised if supplied link is invalid.
"""
pass
class ImgurFileFormats(object):
"""
Contains extensions for file formats that are allowed on imgur. Source:
https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload-
Archived:
http://archive.is/89Uky
https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload-
"""
JPG = '.jpg'
JPEG = '.jpeg'
PNG = '.png'
GIF = '.gif'
APNG = '.apng'
TIFF = '.tiff'
PDF = '.pdf'
XCF = '.xcf'
#WEBM = '.webm'
#MP4 = '.mp4'
@classmethod
def formats(cls):
"""
Return a set consisting of all class attributes. Class attributes must
not be callable.
"""
formats = set()
for attribute in ImgurFileFormats.__dict__.keys():
if attribute[:2] != '__':
value = getattr(ImgurFileFormats, attribute)
if not callable(value):
formats.add(value)
return formats
class Imgur(object):
"""
Imgur contains all necessary methods to extract image or album images from
imgur link.
"""
def __init__(self, url):
"""
Initiate Imgur object.
"""
self.url = self.sanitize(url)
self.images = deque()
def sanitize(self, url):
"""
Check if the supplied link is valid. If not, raise ImgurException. This
method checks only if the domain is valid.
"""
if re.match('https?\:\/\/(i\.)?imgur\.com\/', url):
if self.is_it_gifv(url):
return self.sanitize_gifv(url)
return url
raise ImgurException('Invalid link.')
def sanitize_gifv(self, url):
"""
Remove 'v' from .gifv
"""
pattern = 'https?\:\/\/i\.imgur\.com\/[a-zA-Z0-9]+\.gif'
return re.match(pattern, url).group(0)
def is_it_gifv(self, url):
"""
Check if the supplied link points to .gifv page.
"""
if '.gifv' in url:
return True
return False
def is_it_image(self):
"""
Check if the url points to image. Examples:
http(s)://i.imgur.com/[image_hash].[extension]
http(s)://i.imgur.com/[image_hash]
http(s)://imgur.com/[image_hash]
"""
# https*\:\/\/(i\.)?imgur\.com\/[a-zA-Z0-9]*(\.[a-zA-Z]{1,4})?
return not self.is_it_album()
def is_it_album(self):
"""
Check if the url points to an album. Examples:
http(s)://imgur.com/a/[album_hash]
http(s)://imgur.com/gallery/[album_hash]
"""
return ('/a/' in self.url) or ('/gallery/' in self.url)
def is_it_grid(self):
"""
Check if the url points to a grid view. Example:
http(s)://imgur.com/a/[album_hash]?grid
"""
return self.url.endswith('?grid')
def change_gallery(self):
"""
Change /gallery/ to /a/ in url.
"""
return self.url.replace('/gallery/', '/a/')
def turn_into_grid(self):
"""
Append ?grid to url.
"""
if self.is_it_album():
if not self.is_it_grid():
return ''.join([self.change_gallery(), '?grid'])
else:
return self.url
raise ImgurException('Cannot convert single image into album grid.')
def prepare_images(self):
"""
Parses HTML from the provided url to obtain link(s) to image(s). Raises
exception if the link already ends with an extension.
"""
if self.is_it_image():
if self.contains_extension(self.url):
self.images.append(
self.pack_image(self.url, self.get_image_filename(self.url))
)
return
else:
self.parse_and_prepare_images(self.url)
return
grid = self.turn_into_grid()
self.parse_and_prepare_images(grid)
return
def parse_and_prepare_images(self, url):
"""
Obtain and parse html, and append image dictionaries to image deque.
"""
pattern = '\{"hash":"([a-zA-Z0-9]+)".*?"ext":"([\.a-zA-Z0-9\?\#]+)".*?\}'
try:
html = urlopen(url).read().decode('utf-8')
filenames_with_duplicates = re.findall(pattern, html)
filenames_clean = self.remove_duplicates(filenames_with_duplicates)
urls = self.build_image_url_list(filenames_clean)
for url in urls:
self.images.append(
self.pack_image(url, self.get_image_filename(url))
)
except HTTPError as e:
print(e.status)
except URLError as e:
print(e.reason)
def build_image_url_list(self, filenames):
"""
Build list of direct links to images. Input filenames list is a list of
tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks
like:
['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg']
"""
urls = []
for filename, extension in filenames:
urls.append(''.join(['https://i.imgur.com/', filename, extension]))
return urls
def remove_duplicates(self, filenames):
"""
Remove duplicates from a list of tuples containing filenames with
extensions.
"""
clean = []
for filename in filenames:
if filename not in clean:
clean.append(filename)
return clean
def contains_extension(self, url):
"""
Check if the image url contains extension. If there is an extension it
is returned. Otherwise, None is returned.
"""
for extension in ImgurFileFormats.formats():
if extension in url:
return extension
return None
def get_image_filename(self, url):
"""
Get image file name from its url. Examples:
https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg
https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg
"""
candidate = url.split('/')[-1]
extension = self.contains_extension(url)
pattern = ''.join(['.+\\', extension])
return re.match(pattern, candidate).group(0)
def pack_image(self, url, filename):
"""
Returns a dictionary with image url and corresponding filename.
"""
return {'url' : url, 'filename' : filename}
def number_of_images(self):
"""
Get the number of images from the images attribute.
"""
return len(self.images)
def numerate_images(self):
"""
Append ordinal number to image filename.
"""
total = self.digits_in_a_number(len(self.images))
ordinal = '{0:0%dd}' % total
for index, image in enumerate(self.images, start=1):
image['filename'] = ''.join([
ordinal.format(index), '-', image['filename']
])
def digits_in_a_number(self, number):
"""
Return how many digits are there in a number.
"""
return len(str(number))
|
[
"re.findall",
"re.match",
"collections.deque",
"urllib.request.urlopen"
] |
[((2573, 2580), 'collections.deque', 'deque', ([], {}), '()\n', (2578, 2580), False, 'from collections import deque\n'), ((2777, 2830), 're.match', 're.match', (['"""https?\\\\:\\\\/\\\\/(i\\\\.)?imgur\\\\.com\\\\/"""', 'url'], {}), "('https?\\\\:\\\\/\\\\/(i\\\\.)?imgur\\\\.com\\\\/', url)\n", (2785, 2830), False, 'import re\n'), ((5647, 5672), 're.findall', 're.findall', (['pattern', 'html'], {}), '(pattern, html)\n', (5657, 5672), False, 'import re\n'), ((3148, 3170), 're.match', 're.match', (['pattern', 'url'], {}), '(pattern, url)\n', (3156, 3170), False, 'import re\n'), ((7610, 7638), 're.match', 're.match', (['pattern', 'candidate'], {}), '(pattern, candidate)\n', (7618, 7638), False, 'import re\n'), ((5571, 5583), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (5578, 5583), False, 'from urllib.request import urlopen\n')]
|
import pefile
import numpy as np
# import os
execs = [
"1F2EB7B090018D975E6D9B40868C94CA",
"33DE5067A433A6EC5C328067DC18EC37",
"65018CD542145A3792BA09985734C12A",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"A316D5AECA269CA865077E7FFF356E7D",
"<KEY>",
"AL65_DB05DF0498B59B42A8E493CF3C10C578",
"B07322743778B5868475DBE66EEDAC4F",
"B98hX8E8622C393D7E832D39E620EAD5D3B49",
"BVJ2D9FBF759F527AF373E34673DC3ACA462",
"DS22_A670D13D4D014169C4080328B8FEB86",
"EEE99EC8AA67B05407C01094184C33D2B5A44",
"F6655E39465C2FF5B016980D918EA028",
"F8437E44748D2C3FCF84019766F4E6DC",
"<KEY>",
"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690",
"<KEY>",
"FTTR9EA3C16194CE354C244C1B74C46CD92E",
"<KEY>",
"GFT4_7DDD3D72EAD03C7518F5D47650C8572",
"<KEY>",
"<KEY>",
"JKK8CA6FE7A1315AF5AFEAC2961460A80569",
"<KEY>",
"<KEY>",
"L11_1415EB8519D13328091CC5C76A624E3D",
"NBV_8B75BCBFF174C25A0161F30758509A44",
"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4",
"PL98_BD8B082B7711BC980252F988BB0CA936",
"POL55_A4F1ECC4D25B33395196B5D51A06790",
"QW2_4C6BDDCCA2695D6202DF38708E14FC7E",
"RTC_7F85D7F628CE62D1D8F7B39D8940472",
"SAM_B659D71AE168E774FAAF38DB30F4A84",
"TG78Z__727A6800991EEAD454E53E8AF164A99C",
"VBMM9_149B7BD7218AAB4E257D28469FDDB0D",
"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E",
]
prueba = {"correlativo": None, "nameExec": None, "sectionName": [], "sectionVA": [],
"sectionVS": [], "sectionSR": [], "kernel32": [], "msvcrt": [], "shell32": [],
"user32": [], "ws232": [], "ADVAPI32": [], "GDI32": [], "KERNEL32": [],
"NETAPI32": [], "PSAPI": [], "WININET": [], "ntdll": [], "TimeStamp": None}
# pe = pefile.PE("65018CD542145A3792BA09985734C12A")
# algo = [10, 20, 30, 40, 50]
granPrueba = []
entrysList = []
for a in execs:
sectionNames = []
sectionVA = []
sectionVS = []
sectionSR = []
kernel32 = []
msvcrt = []
shell32 = []
user32 = []
ws232 = []
ADVAPI32 = []
GDI32 = []
KERNEL32 = []
NETAPI32 = []
PSAPI = []
WININET = []
ntdll = []
# print(execs.index(a) + 1)
print("a")
print(a)
c = execs.index(a) + 1
pe = pefile.PE(a)
prueba["correlativo"] = c
prueba["nameExec"] = a
print(c)
print("Secciones")
for section in pe.sections:
print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData)
b = section.Name
sectionNames.append(b.decode('utf-8'))
sectionVA.append(section.VirtualAddress)
sectionVS.append(section.Misc_VirtualSize)
sectionSR.append(section.SizeOfRawData)
prueba["sectionName"] = sectionNames
prueba["sectionVA"] = sectionVA
prueba["sectionVS"] = sectionVS
prueba["sectionSR"] = sectionSR
print()
print()
print("Entradas")
for entry in pe.DIRECTORY_ENTRY_IMPORT:
print('Llamadas DLL:')
print (entry.dll)
l = entry.dll
print('Llamadas a funciones:')
entrysList.append(str(l.decode('utf-8')))
if str(entry.dll) == "b'KERNEL32.DLL'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
kernel32.append(x.decode('utf-8'))
prueba["kernel32"] = kernel32
elif str(entry.dll) == "b'ADVAPI32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
ADVAPI32.append(x.decode('utf-8'))
prueba["ADVAPI32"] = ADVAPI32
elif str(entry.dll) == "b'GDI32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
GDI32.append(x.decode('utf-8'))
prueba["GDI32"] = GDI32
elif str(entry.dll) == "b'KERNEL32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
KERNEL32.append(x.decode('utf-8'))
prueba["KERNEL32"] = KERNEL32
elif str(entry.dll) == "b'NETAPI32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
NETAPI32.append(x.decode('utf-8'))
prueba["NETAPI32"] = NETAPI32
elif str(entry.dll) == "b'PSAPI.DLL'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
PSAPI.append(x.decode('utf-8'))
prueba["PSAPI"] = PSAPI
elif str(entry.dll) == "b'WININET.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
WININET.append(x.decode('utf-8'))
prueba["WININET"] = WININET
elif str(entry.dll) == "b'ntdll.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
ntdll.append(x.decode('utf-8'))
prueba["ntdll"] = ntdll
elif str(entry.dll) == "b'MSVCRT.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
msvcrt.append(x.decode('utf-8'))
prueba["msvcrt"] = msvcrt
elif str(entry.dll) == "b'SHELL32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
shell32.append(x.decode('utf-8'))
prueba["shell32"] = shell32
elif str(entry.dll) == "b'USER32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
user32.append(x.decode('utf-8'))
prueba["user32"] = user32
elif str(entry.dll) == "b'WS2_32.dll'":
for function in entry.imports:
x = function.name
print('\t', x.decode('utf-8'))
ws232.append(x.decode('utf-8'))
prueba["ws232"] = ws232
# listamalware = os.listdir(path)
print()
print()
print("TimeStamp")
print("TimeDateStamp : " + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1])
z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]
print(z)
prueba["TimeStamp"] = z
print(c)
# print()
# print()
# print(pe.FILE_HEADER.NumberOfSections)
granPrueba.append(prueba)
prueba = {"correlativo": None, "nameExec": None, "sectionName": [], "sectionVA": [],
"sectionVS": [], "sectionSR": None, "kernel32": None, "msvcrt": None, "shell32": None,
"user32": None, "ws232": None, "TimeStamp": None}
# print(granPrueba)
import pandas as pd
df = pd.DataFrame(granPrueba)
print(df)
# print(entrysList)
def unique(list1):
x = np.array(list1)
print(np.unique(x))
unique(entrysList)
df.to_csv("dataset.csv")
|
[
"pandas.DataFrame",
"pefile.PE",
"numpy.array",
"numpy.unique"
] |
[((5899, 5923), 'pandas.DataFrame', 'pd.DataFrame', (['granPrueba'], {}), '(granPrueba)\n', (5911, 5923), True, 'import pandas as pd\n'), ((2017, 2029), 'pefile.PE', 'pefile.PE', (['a'], {}), '(a)\n', (2026, 2029), False, 'import pefile\n'), ((5981, 5996), 'numpy.array', 'np.array', (['list1'], {}), '(list1)\n', (5989, 5996), True, 'import numpy as np\n'), ((6004, 6016), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (6013, 6016), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
''' The main script to run, enables profiling. '''
import numpy as np
import math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from crystal import *
from plot_init_tools import *
def main(num_of_growths):
''' Main method to start simulation. Uncomment specific simulation or write a new one. '''
# Main simulation crystal, x is dimensions (m=n=x)
# Stairs
x, init = make_init("stairs", 200)
c = Crystal(x, x, initial_grid=init.copy(), mode="step", hist_int=int(num_of_growths/4), \
border_policy="loop", use_height=False)
c.grow(num_of_growths)
plot_crystal(c)
# # Step
# x, init = make_init("step", 200)
# c = Crystal(x, x, initial_grid=init.copy(), mode="step", hist_int=int(num_of_growths/4), \
# border_policy="loop")
# #c.print_grid()
# c.grow(num_of_growths)
# #c.print_grid()
# plot_crystal(c)
#
# # Screw
# x, init = make_init("screw", 200)
# c = Crystal(x, x, initial_grid=init.copy(), mode="spin", hist_int=int(num_of_growths/8), \
# border_policy="flex")
# #c.print_grid()
# c.grow(num_of_growths)
# #c.print_grid()
# plot_crystal(c)
# # A crystal object serving to visualize only "what grew" without init state
# d = Crystal(x, x, initial_grid=(c.grid-init))
# plot_crystal(d, 2)
# Show history of simulation
plot_history(c)
# # Generate a publishable plot
# plot_out(c)
def profile():
''' Function used to profile code for speedups. '''
import cProfile
cProfile.run('main(50)', 'pstats')
from pstats import Stats
p = Stats('pstats')
p.strip_dirs().sort_stats('time').print_stats(10)
main(50)
#profile()
|
[
"pstats.Stats",
"cProfile.run"
] |
[((1650, 1684), 'cProfile.run', 'cProfile.run', (['"""main(50)"""', '"""pstats"""'], {}), "('main(50)', 'pstats')\n", (1662, 1684), False, 'import cProfile\n'), ((1722, 1737), 'pstats.Stats', 'Stats', (['"""pstats"""'], {}), "('pstats')\n", (1727, 1737), False, 'from pstats import Stats\n')]
|
# -*- coding: utf-8 -*-
from os import path, getcwd, makedirs, listdir, remove
from typing import *
import pickle
from abc import ABCMeta, abstractmethod
from sgfmill.sgf import Sgf_game
import numpy as np
from .go_types import *
__all__ = ["set_cache_dir", "get_cache_dir", "get_game_dir", "get_archive_dir",
"get_array_dir", "GameData", "GameArchive",
"GameDatabase", "ArrayDatabase"]
default_cache_dir = path.join(path.dirname(path.realpath(__file__)), "../..", ".data")
cache_dir = default_cache_dir
archive_folder = path.join(cache_dir, ".kgs")
game_folder = path.join(cache_dir, ".game")
array_folder = path.join(cache_dir, ".array")
def set_cache_dir(directory: Optional[str] = None) -> NoReturn:
global cache_dir, archive_folder, game_folder, array_folder
if directory is None:
directory = default_cache_dir
cache_dir = path.join(getcwd(), directory)
archive_folder = path.join(cache_dir, ".kgs")
game_folder = path.join(cache_dir, ".game")
array_folder = path.join(cache_dir, ".array")
makedirs(get_cache_dir(), exist_ok=True)
makedirs(get_archive_dir(), exist_ok=True)
makedirs(get_game_dir(), exist_ok=True)
makedirs(get_array_dir(), exist_ok=True)
def get_cache_dir() -> str:
return cache_dir
def get_archive_dir() -> str:
return archive_folder
def get_game_dir() -> str:
return game_folder
def get_array_dir() -> str:
return array_folder
class GameData(NamedTuple):
size: int
winner: GoPlayer
sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]]
komi: float
setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]]
@classmethod
def from_sgf(cls, sgf_game: Sgf_game):
size = sgf_game.get_size()
winner = GoPlayer.to_player(sgf_game.get_winner())
sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]),
(node.get_move() for node in sgf_game.get_main_sequence())))
komi = sgf_game.get_komi()
setup_stones = sgf_game.get_root().get_setup_stones()
return cls(size, winner, sequence, komi, setup_stones)
@staticmethod
def from_pickle(name: str, size: Union[int, str] = 19):
with open(path.join(get_game_dir(), str(size), name), "rb") as f:
return pickle.load(f)
@staticmethod
def pickle_exists(name: str, size: Union[int, str] = 19):
return path.exists(path.join(get_game_dir(), str(size), name))
def to_pickle(self, name: str):
makedirs(self.root(), exist_ok=True)
dest = self.path(name)
with open(dest, "wb") as f:
pickle.dump(self, f)
def root(self):
return path.join(get_game_dir(), str(self.size))
def path(self, name: str):
return path.join(self.root(), name)
class GameArchive(metaclass=ABCMeta):
name = "none"
@classmethod
def archive_map(cls):
_dict = {_cls.name: _cls for _cls in cls.__subclasses__()}
for v in cls.__subclasses__():
_dict.update(v.archive_map())
return _dict
@abstractmethod
def retrieve(self, force=False) -> NoReturn:
"""
Retrieve all archives available from Internet.
:param force: whether forces to download archive if it has already existed
"""
pass
@abstractmethod
def extract(self, force=False) -> NoReturn:
"""
Extract all game archives to Game Cache Folder, every single file should end with `.game.pkl` and be
start with it's size of the board.
"""
pass
@abstractmethod
def unpack(self, force=False) -> NoReturn:
"""
Unpack all game archives to
:param force: whether forces to download archive if it has already existed
"""
pass
def download(self, force=False):
self.retrieve(force=force)
self.unpack(force=force)
self.extract(force=force)
class GameDatabase:
def __init__(self, size=19):
self.size = size
def __len__(self):
return len(self.keys())
def __getitem__(self, name: str) -> GameData:
return GameData.from_pickle(name, self.size)
def __setitem__(self, name: str, data: GameData):
data.to_pickle(name)
def __delitem__(self, name: str):
remove(path.join(get_game_dir(), str(self.size), name))
def __contains__(self, name: str):
return path.exists(path.join(get_game_dir(), str(self.size), name))
def __eq__(self, other):
if isinstance(other, GameDatabase):
return self.size == other.size
return NotImplemented
def root(self):
return path.join(get_game_dir(), str(self.size))
def keys(self) -> List[str]:
return listdir(self.root())
def values(self) -> Iterable[GameData]:
for key in self.keys():
yield self[key]
def items(self) -> Iterable[Tuple[str, GameData]]:
for key in self.keys():
yield key, self[key]
class ArrayDatabase:
def __init__(self, method: str, size=19):
self.size = size
self.method = method
makedirs(self.root(), exist_ok=True)
def __len__(self):
return len(self.keys())
def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]:
file = path.join(self.root(), key)
with open(file, "rb") as f:
return pickle.load(f)
def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]):
file = path.join(self.root(), key)
with open(file, "wb") as f:
pickle.dump(value, f)
def __delitem__(self, key: str):
file = path.join(self.root(), key)
remove(file)
def __contains__(self, key: str):
return path.exists(path.join(self.root(), key))
def root(self):
return path.join(get_array_dir(), str(self.size), self.method)
def keys(self) -> List[str]:
return listdir(self.root())
def values(self) -> Iterable[np.ndarray]:
for key in self.keys():
yield self[key]
def items(self) -> Iterable[Tuple[str, np.ndarray]]:
for key in self.keys():
yield key, self[key]
|
[
"os.remove",
"pickle.dump",
"os.getcwd",
"os.path.realpath",
"pickle.load",
"os.path.join"
] |
[((546, 574), 'os.path.join', 'path.join', (['cache_dir', '""".kgs"""'], {}), "(cache_dir, '.kgs')\n", (555, 574), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((589, 618), 'os.path.join', 'path.join', (['cache_dir', '""".game"""'], {}), "(cache_dir, '.game')\n", (598, 618), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((634, 664), 'os.path.join', 'path.join', (['cache_dir', '""".array"""'], {}), "(cache_dir, '.array')\n", (643, 664), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((927, 955), 'os.path.join', 'path.join', (['cache_dir', '""".kgs"""'], {}), "(cache_dir, '.kgs')\n", (936, 955), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((974, 1003), 'os.path.join', 'path.join', (['cache_dir', '""".game"""'], {}), "(cache_dir, '.game')\n", (983, 1003), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((1023, 1053), 'os.path.join', 'path.join', (['cache_dir', '""".array"""'], {}), "(cache_dir, '.array')\n", (1032, 1053), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((455, 478), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (468, 478), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((885, 893), 'os.getcwd', 'getcwd', ([], {}), '()\n', (891, 893), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((5728, 5740), 'os.remove', 'remove', (['file'], {}), '(file)\n', (5734, 5740), False, 'from os import path, getcwd, makedirs, listdir, remove\n'), ((2349, 2363), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2360, 2363), False, 'import pickle\n'), ((2677, 2697), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (2688, 2697), False, 'import pickle\n'), ((5442, 5456), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5453, 5456), False, 'import pickle\n'), ((5617, 5638), 'pickle.dump', 'pickle.dump', (['value', 'f'], {}), '(value, f)\n', (5628, 5638), False, 'import pickle\n')]
|
from collections import defaultdict
import gzip
import os
import random
import textwrap
class Heresy(Exception):
"""You have defiled the word of God!"""
pass
def bits(byte_string):
"""Generates a sequence of bits from a byte stream"""
for byte in byte_string:
for bit_num in range(8):
# Extract bit from byte
byte, bit = byte >> 1, byte % 2
yield bit
def generate_ngram_dict(filename, tuple_length):
"""Generate a dict with ngrams as key following words as value
:param filename: Filename to read from.
:param tuple_length: The length of the ngram keys
:return: Dict of the form {ngram: [next_words], ... }
"""
def file_words(file_pointer):
"""Generator for words in a file"""
for line in file_pointer:
for word in line.split():
yield word
ngrams = defaultdict(lambda: set())
with open(filename, 'r') as fp:
word_list = []
for word in file_words(fp):
if len(word_list) < tuple_length:
word_list.append(word)
continue
ngrams[tuple(word_list)].add(word)
word_list = word_list[1:] + [word]
return {key: tuple(val) for key, val in ngrams.items()}
class GodZip(object):
"""Turn unholy bits into holy words!"""
hallelujah = "Sayeth the Lord:\n\n"
amen = "\n\nAmen."
def __init__(self, tuple_length=3, line_width=70, compress=True):
self.compress = compress
self.line_width = line_width
self.tuple_length = tuple_length
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt')
self.god_grams = generate_ngram_dict(data_path, tuple_length)
self.capital_tuples = [key for key, value in self.god_grams.items()
if key[0][0].isupper()]
def praise(self, unholy_bytes):
"""Encode unholy bytes or unholy unicode into Holy text"""
if not unholy_bytes:
raise Heresy("Thou shalt not be silent in the face of the Lord!")
if not isinstance(unholy_bytes, bytes):
unholy_bytes = unholy_bytes.encode()
if self.compress:
unholy_bytes = gzip.compress(unholy_bytes)
# Start with a capitalized tuple
speech_of_god = list(random.choice(self.capital_tuples))
for bit in bits(unholy_bytes):
holy_tuple = tuple(speech_of_god[-self.tuple_length:])
holy_words = self.god_grams[holy_tuple]
# Make sure that we have some words to choose from
while len(holy_words) <= 1:
chosen_word = holy_words[0]
speech_of_god.append(chosen_word)
holy_tuple = tuple(speech_of_god[-self.tuple_length:])
holy_words = self.god_grams[holy_tuple]
# Select from even indices if bit == 0, odd if bit == 1
chosen_word = random.choice(holy_words[bit::2])
speech_of_god.append(chosen_word)
holy_sentences = ' '.join(speech_of_god).split('. ')
annotated_speech_of_god = '.\n\n'.join(
[
'\n'.join(textwrap.wrap("[{}] ".format(idx + 1) + holy_phrase, width=self.line_width))
for idx, holy_phrase in enumerate(holy_sentences)
]
)
return self.hallelujah + annotated_speech_of_god + self.amen
def reveal_from_words(self, holy_words):
"""Decode a list of holy words into unholy bytes."""
try:
holy_tuple = tuple(holy_words[:self.tuple_length])
except:
raise Heresy("You mock the word of God!")
unholy_bytes = b''
unholy_num = 0
bit_counter = 0
for holy_word in holy_words[self.tuple_length:]:
try:
holy_ngram_list = self.god_grams[holy_tuple]
except:
raise Heresy("Thou shalt not modify the word of God!")
holy_tuple = tuple(holy_tuple[1:] + (holy_word,))
if len(holy_ngram_list) <= 1:
continue
try:
unholy_bit = holy_ngram_list.index(holy_word) % 2
except:
raise Heresy("Not one word of God shall be changed!")
unholy_num |= unholy_bit << bit_counter
bit_counter += 1
if bit_counter % 8 == 0:
unholy_bytes += bytes([unholy_num])
unholy_num = 0
bit_counter = 0
if self.compress:
unholy_bytes = gzip.decompress(unholy_bytes)
return unholy_bytes
def reveal(self, annotated_speech):
"""Decode holy speech into bytes"""
split_annotated_speech = annotated_speech.split('\n\n')
# Check for hallelujah and amen
if split_annotated_speech[0] != self.hallelujah.strip() \
or split_annotated_speech[-1] != self.amen.strip():
raise Heresy("Your praise is insufficient!")
# Remove hallelujah and amen
try:
holy_annotated_sentences = split_annotated_speech[1:-1]
except:
raise Heresy("The word of God will not be silenced!")
# Remove line annotations
try:
holy_words = ' '.join([sentence.split('] ')[1]
for sentence in holy_annotated_sentences]).split()
except:
raise Heresy("How dare you imitate the word of God!")
return self.reveal_from_words(holy_words)
def hex_expand(byte_str):
return ':'.join('{:02x}'.format(byte) for byte in byte_str)
if __name__ == '__main__':
god = GodZip(compress=False)
hello_world = "Hello world!"
print("I praise unto God: %s\n\n" % hello_world)
holy_hello_world = god.praise(hello_world)
print(holy_hello_world)
assert(hello_world == god.reveal(holy_hello_world).decode())
|
[
"gzip.decompress",
"random.choice",
"os.path.abspath",
"gzip.compress"
] |
[((2267, 2294), 'gzip.compress', 'gzip.compress', (['unholy_bytes'], {}), '(unholy_bytes)\n', (2280, 2294), False, 'import gzip\n'), ((2366, 2400), 'random.choice', 'random.choice', (['self.capital_tuples'], {}), '(self.capital_tuples)\n', (2379, 2400), False, 'import random\n'), ((2981, 3014), 'random.choice', 'random.choice', (['holy_words[bit::2]'], {}), '(holy_words[bit::2])\n', (2994, 3014), False, 'import random\n'), ((4594, 4623), 'gzip.decompress', 'gzip.decompress', (['unholy_bytes'], {}), '(unholy_bytes)\n', (4609, 4623), False, 'import gzip\n'), ((1645, 1670), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1660, 1670), False, 'import os\n')]
|
import socket
import os
import pytest
import pyvista
from pyansys.misc import get_ansys_bin
import pyansys
from pyansys.errors import MapdlExitedError
pyvista.OFF_SCREEN = True
# check for a valid MAPDL install with CORBA
valid_rver = ['182', '190', '191', '192', '193', '194', '195', '201']
EXEC_FILE = None
for rver in valid_rver:
if os.path.isfile(get_ansys_bin(rver)):
EXEC_FILE = get_ansys_bin(rver)
if 'PYANSYS_IGNORE_ANSYS' in os.environ:
HAS_ANSYS = False
else:
HAS_ANSYS = EXEC_FILE is not None
skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason="Requires ANSYS installed")
modes = ['corba']
# if os.name == 'posix': # console only for linux
# modes.append('console')
collect_ignore = []
if not HAS_ANSYS:
collect_ignore.append("test_post.py")
@pytest.fixture(scope="session", params=modes)
def mapdl():
# launch in shared memory parallel for Windows VM
# configure shared memory parallel for VM
additional_switches = ''
if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB':
additional_switches = '-smp'
elif os.name == 'posix':
os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp
mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba',
additional_switches=additional_switches)
mapdl._show_matplotlib_figures = False # don't show matplotlib figures
yield mapdl
### test exit ###
# must be after yield as this uses a module scoped fixture
mapdl.exit()
assert mapdl._exited
with pytest.raises(RuntimeError):
mapdl.prep7()
assert not os.path.isfile(mapdl._lockfile)
assert 'MAPDL exited' in str(mapdl)
with pytest.raises(MapdlExitedError):
mapdl.prep7()
|
[
"pyansys.launch_mapdl",
"pyansys.misc.get_ansys_bin",
"pytest.fixture",
"socket.gethostname",
"pytest.raises",
"pytest.mark.skipif",
"os.path.isfile"
] |
[((548, 616), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_ANSYS)'], {'reason': '"""Requires ANSYS installed"""'}), "(not HAS_ANSYS, reason='Requires ANSYS installed')\n", (566, 616), False, 'import pytest\n'), ((803, 848), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'params': 'modes'}), "(scope='session', params=modes)\n", (817, 848), False, 'import pytest\n'), ((1216, 1321), 'pyansys.launch_mapdl', 'pyansys.launch_mapdl', (['EXEC_FILE'], {'override': '(True)', 'mode': '"""corba"""', 'additional_switches': 'additional_switches'}), "(EXEC_FILE, override=True, mode='corba',\n additional_switches=additional_switches)\n", (1236, 1321), False, 'import pyansys\n'), ((360, 379), 'pyansys.misc.get_ansys_bin', 'get_ansys_bin', (['rver'], {}), '(rver)\n', (373, 379), False, 'from pyansys.misc import get_ansys_bin\n'), ((402, 421), 'pyansys.misc.get_ansys_bin', 'get_ansys_bin', (['rver'], {}), '(rver)\n', (415, 421), False, 'from pyansys.misc import get_ansys_bin\n'), ((1580, 1607), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1593, 1607), False, 'import pytest\n'), ((1647, 1678), 'os.path.isfile', 'os.path.isfile', (['mapdl._lockfile'], {}), '(mapdl._lockfile)\n', (1661, 1678), False, 'import os\n'), ((1729, 1760), 'pytest.raises', 'pytest.raises', (['MapdlExitedError'], {}), '(MapdlExitedError)\n', (1742, 1760), False, 'import pytest\n'), ((1019, 1039), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1037, 1039), False, 'import socket\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Description: Build a structural data from orginial Cranfield collection and
# implement the BM25 alogrithm information retrieval;
# also 5 evaluation methods (precision, recall, MAP, P at N and
# NDCG at N) are applied.
# Tested under Python 3.5 on Ubuntu 16.04.
# Author: '(<NAME>.)
# Date created: 2018-05-07
# Here are some Python standard modules used in the script.
import argparse
# Used to parse program arguments.
# More details are here: https://docs.python.org/3/library/argparse.html
import readline
# Used to create a typing history buffer for `manual` mode.
# More details are here: https://docs.python.org/3/library/readline.html
import json
# Used to create a human-readable JSON file for index information and the like.
import string # Used to do some regex operations.
import math
import os
# Here are some Python libraries that places locally.
import porter
STOP_WORDS_PATH = "stopwords.txt"
DOCUMENT_PATH = "./cran/cran.all.1400"
QUERY_PATH = "./cran/cran.qry"
RELEVANCE_PATH = "./cran/cranqrel"
INDEX_PATH = "index.json"
EVALUATION_PATH = "evaluation_output.txt"
# Labels in `cran.all.1400` and `cranqrel` text files.
ID = ".I"
TITLE = ".T"
AUTHORS = ".A"
BIBLIOGRAPHY = ".B"
WORDS = ".W"
LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS]
CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS]
DELIMITER_SYMBOL = "*"
BOUNDARY_LENGTH = 80
# It decides the length of the boundary between two `manual` queries.
MOST_RELEVANT = 15
# At most top `MOST_RELEVANT` results are returned for each query.
USER_STOP_WORD = "QUIT"
# When user types `USER_STOP_WORD`, the program ends; it is case-sensitive.
RELEVANCE_SCORE_THRESHOLD = 4
# Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD`
# from `QUERY_PATH`. The default value is 4 (-1, 1, 2, 3, 4), which means all
# documents in it will be reserved.
RELEVANCE_SCORE_FIX = 5
# It is a number used as minuend to convert original relevance scores to
# NDCG-friendly ones.
# Constants used in BM25 model.
K = 1.0
B = 0.75
# A constant used in Precision at N and NDCG at N.
# If `MOST_RELEVANT` is equal to `N`, precision will be the same as P at N for Cranfield collection.
# N.B.: `N` cannot be larger than `MOST_RELEVANT`.
N = 10
def is_number(word):
""" A helper function to check if a string can be converted to an integer.
Used to process documents and queries.
"""
try:
int(word)
return True
except ValueError:
return False
def is_valid(word):
""" A helper function to check if a string is valid.
Used to process documents and queries.
"""
if word != "" and word not in stop_words and not is_number(word):
return True
else:
return False
def get_arguments():
parser = argparse.ArgumentParser(description = "A script used to build BM25 model and relative evaluation methods. If the index JSON file is not available, just type `python3 bm25.py` to generate one in the working directory and extra arguments will be ignored in this case")
parser.add_argument("-m", required = False, choices = ["manual", "evaluation"], default = "manual", help = "mode selection; `manual` mode is chosen by default if it is not specified")
parser.add_argument("-o", required = False, nargs = "?", const = EVALUATION_PATH, metavar = "FILE NAME", help = "BM25 evaluation result output in lines of 3-tuples (query ID, document ID, and its rank [1 - 15]) form; if `FILE NAME` is not given, the default output file name is `evaluation_output.txt`")
return parser.parse_args()
def load_stop_words():
stop_words = set()
with open(STOP_WORDS_PATH, "r") as fp:
for line in fp:
stop_words.add(line.rstrip())
return stop_words
def process_documents():
""" Build vectors of each term and calculate lengths of each documents.
Also a dictionary containing pairs of original words and stemmed words
are returned.
"""
def add_new_word(word):
# A helper function to add a new word in `term_vectors`.
if word not in stemming:
stemming[word] = stemmer.stem(word)
stemmed_word = stemming[word]
if stemmed_word not in term_vectors:
term_vectors[stemmed_word] = {}
if document_ID in term_vectors[stemmed_word]:
(term_vectors[stemmed_word])[document_ID] += 1
else:
term_vectors[stemmed_word].update({document_ID : 1})
stemming = {}
term_vectors = {}
# `term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID : [Value] Appearance Times}}.
document_lengths = {}
average_length = 0.0
num_of_documents = 0
with open(DOCUMENT_PATH, "r") as fp:
document_ID = 0
length = 0.0
for line in fp:
current_section = line[0 : 2]
if current_section in LABELS:
if current_section == ID:
document_lengths[document_ID] = math.sqrt(length)
# Calculate the previous document length and start a new one.
# The empty entry for document 0 is also created although
# in Cranfield collection, document ID begins from 001.
average_length += document_lengths[document_ID]
document_ID += 1
# Ignore original document IDs, which is the numbers followed by ".I",
# since they may not be consecutive.
num_of_documents += 1
length = 0.0
section = current_section
continue # Update and go to next line immediately.
elif section in CONTENTS:
line = line.translate(removing_punctuation_map)
line = line.replace("--", " ")
# Also, treat two consecutive hyphens as a space.
for term in line.split():
# Split according to whitespace characters and deal with two special cases:
# abbreviations with "." and hyphenated compounds.
term = term.replace(".", "")
# Remove full stops in one term, used to convert abbreviations
# like "m.i.t." (line 1222) / "u.s.a." (line 32542) into "mit" / "usa".
# In the meantime, something like "..e.g.at" (line 17393),
# "i.e.it" (line 17287), "trans.amer.math.soc.33" (line 31509),
# or "studies.dash" (line 516) will not be handled as expected.
# All float-point numbers like "3.2x10" (line 18799), "79.5degree"
# (line 20026) will be converted into integers by just removing dots.
# And similarly, phrases like "m. i. t." (line 36527) and
# "i. e." (line 11820) will be ignored.
# "r.m.s." (line 20241) will become "rm" stored in the dictionary after stemming.
compound = term.replace("-", "")
if is_valid(compound):
add_new_word(compound)
if section == WORDS:
length += 1.0
# Treat a compound word as one word; words in `AUTHORS`
# and `BIBLIOGRAPHY` section will not be counted.
term_split = term.split("-")
if len(term_split) > 1:
# If only one item in `term_split`, which means there is no hyphen in this word.
# There may exist a term with an ending hyphens like
# "sub- and" (line 14632), which causes an extra empty string is created
# and makes term_split look like ["sub", ""].
for element in term_split:
# Deal with each part of compound words like "two-step" (line 38037) or
# type names like "75s-t6" (line 28459) or "a52b06" (line 25717).
if is_valid(element):
add_new_word(element)
# Filter out all pure integers; for example, for "f8u-3" (line 35373),
# both "f8u" and "f8u3" will be saved, but not "3".
# Calculate the last length since Cranfield collection does not have ending symbols.
document_lengths[document_ID] = math.sqrt(length)
# Skip the document with index 0 from document length vector.
del document_lengths[0]
average_length = (document_lengths[document_ID] + average_length) / num_of_documents
for document in document_lengths.keys():
document_lengths[document] = document_lengths[document] / average_length
# Now document_lengths stores a normalised length for each document.
return stemming, term_vectors, document_lengths
def process_single_query(query):
""" Process single line text.
Used by `process_queries` function and `manual` mode.
"""
def add_new_word(word):
# A helper function to add a new word in `query_terms`.
if word not in stemming:
stemming[word] = stemmer.stem(word)
stemmed_word = stemming[word]
if stemmed_word not in query_terms:
query_terms.append(stemmed_word)
query_terms = []
query = query.strip()
query = query.translate(removing_punctuation_map)
query = query.replace("--", " ")
for term in query.split():
term = term.replace(".", "").lower()
compound = term.replace("-", "")
if is_valid(compound):
add_new_word(compound)
term_split = term.split("-")
if len(term_split) > 1:
for element in term_split:
if is_valid(element):
add_new_word(element)
return query_terms
def process_queries():
with open(QUERY_PATH, "r") as fp:
query_list = {}
query = []
query_ID = 0
for line in fp:
current_section = line[0 : 2]
if current_section in LABELS:
if current_section == ID:
query_list[query_ID] = query
query = []
query_ID += 1
# Ignore original query IDs, which is the numbers followed
# by ".I", since they are not consecutive.
if current_section == WORDS:
section = current_section
continue
elif section in CONTENTS:
if query == []:
query = process_single_query(line)
else:
query += process_single_query(line)
query_list[query_ID] = query # Add the last entry.
del query_list[0] # Skip the first one.
return query_list
def bm25_similarities(query):
""" It returns a descending list with at most top `MOST_RELEVANT` pairs
(Document ID, Similarity) based on BM25 to calculate similarities.
"""
similarities = []
for document_ID in range(1, nums_of_documents + 1):
# Document ID begins from 1.
similarity = 0.0
for term in query:
if term in term_vectors and document_ID in term_vectors[term]:
frequency = (term_vectors[term])[document_ID]
n_i = len(term_vectors[term])
idf = math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2)
similarity += frequency * (1.0 + K) / (frequency + K * ((1.0 - B) + B * document_lengths[document_ID])) * idf
if similarity > 0.0: # Ignore the one with similarity score 0.
pair = (document_ID, similarity)
similarities.append(pair)
# Sort results in desceding order.
similarities = sorted(similarities, key = lambda x : x[1], reverse = True)
if len(similarities) > MOST_RELEVANT:
return similarities[0 : MOST_RELEVANT]
else:
return similarities
def manual_mode():
""" When in `manual` mode, the function will not end until user types "QUIT".
"""
while True:
print(DELIMITER_SYMBOL * BOUNDARY_LENGTH)
# Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default
# width of terminal window.
user_query = input("Enter query (type \"QUIT\" to terminate): ")
if user_query == USER_STOP_WORD:
break
query_terms = process_single_query(user_query)
print("Results for query " + str(query_terms))
print("Rank\tID\tScore")
rank = 1
for result in bm25_similarities(query_terms):
print("{0}\t{1}\t{2}".format(str(rank), result[0], str(result[1])), end = "\n")
rank += 1
def load_relevance_scores():
relevance_scores = {}
# `relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}
with open(RELEVANCE_PATH, "r") as fp:
for line in fp:
fields = line.split()
query_ID = int(fields[0])
pair = (int(fields[1]), int(fields[2]))
if query_ID in relevance_scores:
relevance_scores[query_ID].append(pair)
# It assumes no repetition of document IDs for each query.
else:
relevance_scores[query_ID] = [pair]
for query_ID in relevance_scores:
# Sort pairs in ascending order for each query; the less the relevance
# score is, the more relevant the document is.
relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1])
return relevance_scores
def make_query_results():
""" It returns possible relevant documents for each query based on BM25 model.
"""
query_list = process_queries()
query_results = {}
# `query_results` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}, which is exactly the same structure and length as `relevance_scores`.
for query_ID in query_list:
rank = 1
query_results[query_ID] = []
for pair in bm25_similarities(query_list[query_ID]):
query_results[query_ID].append((pair[0], rank))
rank += 1
return query_results
def make_relevance_set(query_ID): # Relevant documents (Rel).
relevance_set = set()
for pair in relevance_scores[query_ID]:
if pair[1] <= RELEVANCE_SCORE_THRESHOLD:
# We only include queries whose relevance scores are less than or equal
# to `RELEVANCE_SCORE_THRESHOLD` here.
relevance_set.add(pair[0])
return relevance_set
def make_retrieval_set(query_ID): # Retrieval documents (Ret).
retrieval_set = set()
for pair in query_results[query_ID]:
retrieval_set.add(pair[0])
return retrieval_set
def precision():
""" It calculates arithmetic mean of precisions for all queries.
"""
precision = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
retrieval_set = make_retrieval_set(query_ID)
appearance_times = 0
for document_ID in retrieval_set:
if document_ID in relevance_set:
appearance_times += 1
precision += appearance_times / len(retrieval_set)
precision = precision / len(query_results)
return precision
def recall():
""" It calculates arithmetic mean of recalls for all queries.
"""
recall = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
retrieval_set = make_retrieval_set(query_ID)
appearance_times = 0
for document_ID in relevance_set:
if document_ID in retrieval_set:
appearance_times += 1
recall += appearance_times / len(relevance_set)
recall = recall / len(query_results)
return recall
def p_at_n(n):
""" It calculates arithmetic mean of precisions at N for all queries.
"""
p_at_n = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
appearance_times = 0
for pair in query_results[query_ID]:
if pair[0] in relevance_set and pair[1] <= n:
appearance_times += 1
p_at_n += appearance_times / n
p_at_n = p_at_n / len(query_results)
return p_at_n
def mean_average_precision():
""" It calculates mean average precision for all queries.
"""
mean_average_precision = 0.0
for query_ID in relevance_scores:
relevance_set = make_relevance_set(query_ID)
appearance_times = 0
current_map = 0.0
for pair in query_results[query_ID]:
if pair[0] in relevance_set:
appearance_times += 1
current_map += appearance_times / pair[1]
mean_average_precision += current_map / len(relevance_set)
mean_average_precision = mean_average_precision / len(query_results)
return mean_average_precision
def ndcg_at_n(n):
""" It yields a list of NDCGs at up to N of each query separately.
"""
for query_ID, score_list in relevance_scores.items():
relevance_set = make_relevance_set(query_ID)
score_list_dict = dict(score_list)
# Convert a list of pairs to dictionary for convienence.
# Step one: gain vector.
gain_vector = []
for pair in query_results[query_ID]:
if pair[0] in relevance_set:
gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]])
# Convert original ranking scores to NDCG-usable scores.
else:
gain_vector.append(0)
# Step two: DCG (Discounted Cumulated Gain).
dcg = [gain_vector[0]]
# Put the first item in `dcg`.
for i in range(1, len(gain_vector)):
dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1])
# Step three: IDCG (Ideal Discounted Cumulated Gain).
ideal_gain_vector = []
for pair in score_list:
ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]])
idcg = [ideal_gain_vector[0]]
for i in range(1, len(ideal_gain_vector)):
idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1])
# Step four: NDCG (Normalised Discounted Cumulated Gain) at N.
ndcg_at_n = []
for pair in zip(dcg, idcg):
ndcg_at_n.append(pair[0] / pair[1])
if len(ndcg_at_n) > n:
# And finally, yield at most `n` results for each query.
yield query_ID, ndcg_at_n[0 : n]
else:
yield query_ID, ndcg_at_n
def print_evaluation_results():
print("Evaluation Results:")
print("Precision: {0}".format(precision()), end = "\n")
print("Recall: {0}".format(recall()), end = "\n")
print("P@{0}: {1}".format(N, p_at_n(N)), end = "\n")
print("Mean Average Precision: {0}".format(mean_average_precision()), end = "\n")
for query_ID, ndcg in ndcg_at_n(N):
print("NDCG@{0} <Query {1}>: {2}".format(N, query_ID, ndcg), end = "\n")
if __name__ == "__main__":
stemmer = porter.PorterStemmer()
stop_words = load_stop_words()
punctuation = string.punctuation[0 : 12] + string.punctuation[14:]
removing_punctuation_map = dict((ord(character), " ") for character in punctuation)
# Remove all punctuations except full stops and hyphens.
args = get_arguments()
if os.path.exists(INDEX_PATH):
print("[Loading BM25 index from file.]")
with open(INDEX_PATH, "r") as fp:
stemming, term_vectors, document_lengths = json.load(fp)
# Warning: unlike Python, `dict` type in JSON cannot have `int` key,
# therefore a conversion is of necessity.
document_lengths = {int(ID) : length for ID, length in document_lengths.items()}
for term, vector in term_vectors.items():
term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in vector.items()}
nums_of_documents = len(document_lengths)
# It is used in `bm25_similarities()` function.
if args.m == "manual":
manual_mode()
elif args.m == "evaluation":
relevance_scores = load_relevance_scores()
query_results = make_query_results()
print_evaluation_results()
if args.o is not None: # If `-o` option is available.
with open(args.o, "w") as fp:
for query_ID, pair_list in query_results.items():
for pair in pair_list:
fp.write("{0} {1} {2}\n".format(query_ID, pair[0], pair[1]))
else:
# For first-time running, it creates an index JSON file and exit.
print("[Generating the index file.]")
with open(INDEX_PATH, "w") as fp:
json.dump(process_documents(), fp)
|
[
"json.load",
"porter.PorterStemmer",
"argparse.ArgumentParser",
"math.sqrt",
"os.path.exists",
"math.log"
] |
[((2791, 3065), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A script used to build BM25 model and relative evaluation methods. If the index JSON file is not available, just type `python3 bm25.py` to generate one in the working directory and extra arguments will be ignored in this case"""'}), "(description=\n 'A script used to build BM25 model and relative evaluation methods. If the index JSON file is not available, just type `python3 bm25.py` to generate one in the working directory and extra arguments will be ignored in this case'\n )\n", (2814, 3065), False, 'import argparse\n'), ((7480, 7497), 'math.sqrt', 'math.sqrt', (['length'], {}), '(length)\n', (7489, 7497), False, 'import math\n'), ((16711, 16733), 'porter.PorterStemmer', 'porter.PorterStemmer', ([], {}), '()\n', (16731, 16733), False, 'import porter\n'), ((17006, 17032), 'os.path.exists', 'os.path.exists', (['INDEX_PATH'], {}), '(INDEX_PATH)\n', (17020, 17032), False, 'import os\n'), ((17159, 17172), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (17168, 17172), False, 'import json\n'), ((9959, 10017), 'math.log', 'math.log', (['((nums_of_documents - n_i + 0.5) / (n_i + 0.5))', '(2)'], {}), '((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2)\n', (9967, 10017), False, 'import math\n'), ((4776, 4793), 'math.sqrt', 'math.sqrt', (['length'], {}), '(length)\n', (4785, 4793), False, 'import math\n'), ((15583, 15601), 'math.log', 'math.log', (['(i + 1)', '(2)'], {}), '(i + 1, 2)\n', (15591, 15601), False, 'import math\n'), ((15912, 15930), 'math.log', 'math.log', (['(i + 1)', '(2)'], {}), '(i + 1, 2)\n', (15920, 15930), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-14 19:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("variants", "0012_auto_20181114_1914")]
operations = [
migrations.AddField(
model_name="smallvariantflags",
name="flag_summary",
field=models.CharField(
choices=[
("positive", "positive"),
("uncertain", "uncertain"),
("negative", "negative"),
("empty", "empty"),
],
default="empty",
max_length=32,
),
)
]
|
[
"django.db.models.CharField"
] |
[((403, 570), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('positive', 'positive'), ('uncertain', 'uncertain'), ('negative',\n 'negative'), ('empty', 'empty')]", 'default': '"""empty"""', 'max_length': '(32)'}), "(choices=[('positive', 'positive'), ('uncertain',\n 'uncertain'), ('negative', 'negative'), ('empty', 'empty')], default=\n 'empty', max_length=32)\n", (419, 570), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
import logging
from pecan import expose, request # noqa
from sqlalchemy.exc import IntegrityError
from cloudrunner_server.api.decorators import wrap_command
from cloudrunner_server.api.model import Group, Org, Role
from cloudrunner_server.api.policy.decorators import check_policy
from cloudrunner_server.api.util import JsonOutput as O
LOG = logging.getLogger()
class Groups(object):
@expose('json', generic=True)
@check_policy('is_admin')
@wrap_command(Group)
def groups(self, name=None, *args):
def modifier(roles):
return [dict(as_user=role.as_user, servers=role.servers)
for role in roles]
if name:
group = Group.visible(request).filter(Group.name == name).first()
return O.group(group.serialize(
skip=['id', 'org_id'],
rel=[('roles', 'roles', modifier)]))
else:
groups = [u.serialize(
skip=['id', 'org_id'],
rel=[('roles', 'roles', modifier)])
for u in Group.visible(request).all()]
return O._anon(groups=groups,
quota=dict(allowed=request.user.tier.groups))
@groups.when(method='POST', template='json')
@check_policy('is_admin')
@groups.wrap_create()
def add_group(self, name, *args, **kwargs):
name = name or kwargs['name']
org = request.db.query(Org).filter(
Org.name == request.user.org).one()
group = Group(name=name, org=org)
request.db.add(group)
request.db.commit()
@groups.when(method='PUT', template='json')
@check_policy('is_admin')
@groups.wrap_modify()
def modify_group_roles(self, name, *args, **kwargs):
name = name or kwargs['name']
add_roles = request.POST.getall('add')
rm_roles = request.POST.getall('remove')
group = Group.visible(request).filter(Group.name == name).first()
if not group:
return O.error(msg="Group is not available")
for role in rm_roles:
as_user, _, servers = role.rpartition("@")
if not as_user or not servers:
continue
if as_user == "*":
as_user = "@"
roles = [r for r in group.roles if r.as_user == as_user and
r.servers == servers]
for r in roles:
request.db.delete(r)
request.db.commit()
errs = []
for role in add_roles:
as_user, _, servers = role.rpartition("@")
if not Role.is_valid(as_user):
errs.append(as_user)
if errs:
if len(errs) == 1:
return O.error(msg="The role '%s' is not valid" % errs[0])
else:
return O.error(msg="The following roles are not valid: %s" %
", ".join(errs))
for role in add_roles:
as_user, _, servers = role.rpartition("@")
if not as_user or not servers:
continue
if as_user == "*":
as_user = "@"
r = Role(as_user=as_user, servers=servers, group=group)
try:
request.db.add(r)
request.db.commit()
except IntegrityError:
request.db.rollback()
@groups.when(method='DELETE', template='json')
@check_policy('is_admin')
@groups.wrap_delete()
def rm_group(self, name, *args):
group = Group.visible(request).filter(Group.name == name).first()
if not group:
return O.error(msg="Group not found")
request.db.delete(group)
request.db.commit()
|
[
"pecan.request.db.rollback",
"cloudrunner_server.api.decorators.wrap_command",
"pecan.request.db.query",
"cloudrunner_server.api.policy.decorators.check_policy",
"pecan.request.db.commit",
"pecan.expose",
"pecan.request.db.delete",
"pecan.request.db.add",
"cloudrunner_server.api.model.Role",
"cloudrunner_server.api.model.Group",
"pecan.request.POST.getall",
"cloudrunner_server.api.model.Group.visible",
"cloudrunner_server.api.model.Role.is_valid",
"cloudrunner_server.api.util.JsonOutput.error",
"logging.getLogger"
] |
[((812, 831), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (829, 831), False, 'import logging\n'), ((862, 890), 'pecan.expose', 'expose', (['"""json"""'], {'generic': '(True)'}), "('json', generic=True)\n", (868, 890), False, 'from pecan import expose, request\n'), ((896, 920), 'cloudrunner_server.api.policy.decorators.check_policy', 'check_policy', (['"""is_admin"""'], {}), "('is_admin')\n", (908, 920), False, 'from cloudrunner_server.api.policy.decorators import check_policy\n'), ((926, 945), 'cloudrunner_server.api.decorators.wrap_command', 'wrap_command', (['Group'], {}), '(Group)\n', (938, 945), False, 'from cloudrunner_server.api.decorators import wrap_command\n'), ((1719, 1743), 'cloudrunner_server.api.policy.decorators.check_policy', 'check_policy', (['"""is_admin"""'], {}), "('is_admin')\n", (1731, 1743), False, 'from cloudrunner_server.api.policy.decorators import check_policy\n'), ((2102, 2126), 'cloudrunner_server.api.policy.decorators.check_policy', 'check_policy', (['"""is_admin"""'], {}), "('is_admin')\n", (2114, 2126), False, 'from cloudrunner_server.api.policy.decorators import check_policy\n'), ((3872, 3896), 'cloudrunner_server.api.policy.decorators.check_policy', 'check_policy', (['"""is_admin"""'], {}), "('is_admin')\n", (3884, 3896), False, 'from cloudrunner_server.api.policy.decorators import check_policy\n'), ((1964, 1989), 'cloudrunner_server.api.model.Group', 'Group', ([], {'name': 'name', 'org': 'org'}), '(name=name, org=org)\n', (1969, 1989), False, 'from cloudrunner_server.api.model import Group, Org, Role\n'), ((1998, 2019), 'pecan.request.db.add', 'request.db.add', (['group'], {}), '(group)\n', (2012, 2019), False, 'from pecan import expose, request\n'), ((2028, 2047), 'pecan.request.db.commit', 'request.db.commit', ([], {}), '()\n', (2045, 2047), False, 'from pecan import expose, request\n'), ((2268, 2294), 'pecan.request.POST.getall', 'request.POST.getall', (['"""add"""'], {}), "('add')\n", (2287, 2294), False, 'from pecan import expose, request\n'), ((2314, 2343), 'pecan.request.POST.getall', 'request.POST.getall', (['"""remove"""'], {}), "('remove')\n", (2333, 2343), False, 'from pecan import expose, request\n'), ((2900, 2919), 'pecan.request.db.commit', 'request.db.commit', ([], {}), '()\n', (2917, 2919), False, 'from pecan import expose, request\n'), ((4115, 4139), 'pecan.request.db.delete', 'request.db.delete', (['group'], {}), '(group)\n', (4132, 4139), False, 'from pecan import expose, request\n'), ((4148, 4167), 'pecan.request.db.commit', 'request.db.commit', ([], {}), '()\n', (4165, 4167), False, 'from pecan import expose, request\n'), ((2459, 2496), 'cloudrunner_server.api.util.JsonOutput.error', 'O.error', ([], {'msg': '"""Group is not available"""'}), "(msg='Group is not available')\n", (2466, 2496), True, 'from cloudrunner_server.api.util import JsonOutput as O\n'), ((3603, 3654), 'cloudrunner_server.api.model.Role', 'Role', ([], {'as_user': 'as_user', 'servers': 'servers', 'group': 'group'}), '(as_user=as_user, servers=servers, group=group)\n', (3607, 3654), False, 'from cloudrunner_server.api.model import Group, Org, Role\n'), ((4075, 4105), 'cloudrunner_server.api.util.JsonOutput.error', 'O.error', ([], {'msg': '"""Group not found"""'}), "(msg='Group not found')\n", (4082, 4105), True, 'from cloudrunner_server.api.util import JsonOutput as O\n'), ((2871, 2891), 'pecan.request.db.delete', 'request.db.delete', (['r'], {}), '(r)\n', (2888, 2891), False, 'from pecan import expose, request\n'), ((3044, 3066), 'cloudrunner_server.api.model.Role.is_valid', 'Role.is_valid', (['as_user'], {}), '(as_user)\n', (3057, 3066), False, 'from cloudrunner_server.api.model import Group, Org, Role\n'), ((3176, 3227), 'cloudrunner_server.api.util.JsonOutput.error', 'O.error', ([], {'msg': '("The role \'%s\' is not valid" % errs[0])'}), '(msg="The role \'%s\' is not valid" % errs[0])\n', (3183, 3227), True, 'from cloudrunner_server.api.util import JsonOutput as O\n'), ((3688, 3705), 'pecan.request.db.add', 'request.db.add', (['r'], {}), '(r)\n', (3702, 3705), False, 'from pecan import expose, request\n'), ((3722, 3741), 'pecan.request.db.commit', 'request.db.commit', ([], {}), '()\n', (3739, 3741), False, 'from pecan import expose, request\n'), ((3793, 3814), 'pecan.request.db.rollback', 'request.db.rollback', ([], {}), '()\n', (3812, 3814), False, 'from pecan import expose, request\n'), ((1870, 1891), 'pecan.request.db.query', 'request.db.query', (['Org'], {}), '(Org)\n', (1886, 1891), False, 'from pecan import expose, request\n'), ((2360, 2382), 'cloudrunner_server.api.model.Group.visible', 'Group.visible', (['request'], {}), '(request)\n', (2373, 2382), False, 'from cloudrunner_server.api.model import Group, Org, Role\n'), ((3976, 3998), 'cloudrunner_server.api.model.Group.visible', 'Group.visible', (['request'], {}), '(request)\n', (3989, 3998), False, 'from cloudrunner_server.api.model import Group, Org, Role\n'), ((1160, 1182), 'cloudrunner_server.api.model.Group.visible', 'Group.visible', (['request'], {}), '(request)\n', (1173, 1182), False, 'from cloudrunner_server.api.model import Group, Org, Role\n'), ((1519, 1541), 'cloudrunner_server.api.model.Group.visible', 'Group.visible', (['request'], {}), '(request)\n', (1532, 1541), False, 'from cloudrunner_server.api.model import Group, Org, Role\n')]
|
"""
A client api for couchdb attachments
"""
"""Main module."""
import logging
import fnmatch
import io
import mimetypes
import os
import pathlib
import re
import tempfile
from contextlib import contextmanager
import requests
logger = logging.getLogger(__file__)
echo = logger.info
class CouchDBClientException(Exception):
def __init__(self, *args, **kwargs):
super(CouchDBClientException, self).__init__(*args, **kwargs)
class URLRequired(CouchDBClientException):
"""A valid URL is required."""
class BadConnectionURI(CouchDBClientException):
"""A valid URL is required."""
class CouchDBClient:
URI_ENVIRON_KEY = 'COUCHDB_URI'
CONNECTION_RE = 'couchdb(s)?://((\w+)\:(.+)@)?([\w\.]+)(:(\d+))?/(\w+)'
URI_RE = re.compile(CONNECTION_RE)
def __init__(self, uri=None):
if uri is None:
uri = os.environ.get(self.URI_ENVIRON_KEY)
if not uri:
key = self.URI_ENVIRON_KEY
raise URLRequired(f'You can set environment varialble {key}')
scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri)
if userid and psswd:
self.auth = (userid, psswd)
else:
self.auth = None
self.db = db
self.db_uri = f'{scheme}://{host}{port}/{self.db}'
def check_db(self):
response = requests.head(f"{self.db_uri}", auth=self.auth)
return response.status_code == 200
def create_db(self):
response = requests.put(f"{self.db_uri}", auth=self.auth)
response.raise_for_status()
def save_doc(self, doc):
_id = doc['_id']
doc_uri = f'{self.db_uri}/{_id}'
response = requests.head(doc_uri, auth=self.auth)
if response.status_code == 200:
rev = response.headers['ETag']
headers = {'If-Match': rev[1:-1]}
response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth)
elif response.status_code == 404:
response = requests.post(self.db_uri, json=doc, auth=self.auth)
response.raise_for_status()
def parse_connection_uri(self, uri):
"""
Given:
'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/
:param uri:
:return {host, db, auth, passwd}:
"""
if match := self.URI_RE.match(uri):
(ssl, _, userid, psswd, host, _, port, db) = match.groups()
scheme = 'http' + ('s' if ssl else '')
port = f':{port}' if port else ''
return scheme, userid, psswd, host, port, db
else:
raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}')
def list_attachments(self, *patterns):
regexs = []
for pattern in patterns:
if self.WILDCARD_RE.search(pattern):
regex = re.compile(fnmatch.translate(pattern))
else:
regex = re.compile(fnmatch.translate(pattern)[:-2])
regexs.append(regex)
for file_path, file_size in self.run_view():
if not regexs or any([regex.search(file_path) for regex in regexs]):
yield file_path, file_size
def run_view(self, **args):
params = {'reduce': False, 'include_docs': False}
if 'depth' in args:
params['group_level'] = args['depth']
params['reduce'] = True
response = requests.get(f"{self.db_uri}/_design/couchfs_views/_view/attachment_list", params=params, auth=self.auth)
response.raise_for_status()
for doc in response.json()['rows']:
yield '/'.join(doc['key']), doc['value']
def download(self, src, dst, dry_run=False):
for src, dst in self.download_srcdst(src, dst):
if dry_run:
yield src, dst, 'DRY RUN', response
else:
uri = f'{self.db_uri}/{src}'
response = requests.get(uri, auth=self.auth)
yield uri, dst, response.status_code, response.reason
WILDCARD_RE = re.compile('[\*\?\[\]]+')
def download_srcdst(self, src, dst, dry_run=False):
if match := self.WILDCARD_RE.search(src):
regex = re.compile(fnmatch.translate(src))
is_copying_files = True
else:
regex = re.compile(fnmatch.translate(src)[:-2])
sub_regex = re.compile(src)
is_copying_files = False
for file_path, _ in self.ls():
if regex.search(file_path):
if is_copying_files:
match = self.WILDCARD_RE.search(src)
dst_file_path = file_path[match.span()[0]:]
if dst_file_path.startswith('/'):
dst_file_path = file_path[1:]
dest_path = os.path.join(dst, dst_file_path)
else:
dst_file_path = file_path[len(src):]
if file_path.startswith('/'):
dst_file_path = dst_file_path[1:]
dest_path = os.path.join(dst, dst_file_path[1:])
if not dest_path.startswith('dump'):
print('NO DUMP', is_copying_files, dst, file_path[len(src):])
# break
yield file_path, dest_path
def download_file(self, url, dest):
with open(dest, 'wb') as f:
return self.download_to_file(url, f)
def download_to_file(self, url, file_obj):
with requests.get(url, stream=True, auth=self.auth) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192):
if chunk:
file_obj.write(chunk)
@contextmanager
def get_attachment(self, url, in_memory=False):
try:
if in_memory:
bytes_fp = io.BytesIO()
self.download_to_file(url, bytes_fp)
yield bytes_fp.getvalue()
else:
fp = tempfile.NamedTemporaryFile(delete=False)
self.download_to_file(url, fp)
fp.close()
yield open(fp.name, 'rb')
finally:
if in_memory:
bytes_fp.close()
else:
os.unlink(fp.name)
def get_attachment_as_bytes(self, url):
return requests.get(url, stream=True, auth=self.auth).content
def upload(self, src, dst, dry_run=False):
src = os.path.abspath(src)
if os.path.isfile(src):
if dry_run:
yield src, dst, 'DRY RUN', ''
else:
with open(src, 'rb') as src_fp:
yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src)))
elif os.path.isdir(src):
p = pathlib.Path(src).resolve()
for (dirpath, dirs, files) in os.walk(src):
for filename in files:
file_path = os.path.join(dirpath, filename)
pp = file_path[len(p.parent.as_posix()) + 1:]
dest_path = os.path.join(dst, pp)
if dry_run:
yield file_path, dest_path, 'DRY RUN', ''
else:
yield self.upload_file(file_path, dest_path)
def upload_bytes_file(self, src_bytes, dst):
with tempfile.NamedTemporaryFile() as src_fp:
src_fp.name = os.path.basename(dst)
src_fp.write(src_bytes)
return self.upload_file(src_fp, dst)
def upload_file(self, src, dst):
"""
Uploads a file using dst as the doc/bucket id
:param src: path to file to upload
:param dst: id
:return: file_name, file_url, upload status, upload message
"""
doc_id = [segment for segment in dst.split('/') if segment][0]
file_name = '/'.join(dst.split('/')[1:])
doc_uri = f'{self.db_uri}/{doc_id}'
file_uri = f'{doc_uri}/{file_name}'
response = requests.head(f'{doc_uri}', auth=self.auth)
if response.status_code == 404:
response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth)
if response.status_code != 201:
return file_name, f'{file_uri}', response.status_code, response.reason
rev = response.json()['rev']
else:
rev = response.headers['ETag']
major, _ = mimetypes.guess_type(src.name)
headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]}
response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth)
response.raise_for_status()
return file_name, f'{file_uri}', response.status_code, response.reason
@classmethod
def init_db(cls, logger=echo):
echo('connecting to couchdb')
client = cls()
logger('checking the db')
if not client.check_db():
logger('creating the db')
client.create_db()
_id = client.COUCHFS_VIEWS['_id']
logger(f'creating or updating the db {_id}')
client.save_doc(client.COUCHFS_VIEWS)
logger(f'db is now setup')
COUCHFS_VIEWS={
"_id": "_design/couchfs_views",
"views": {
"attachment_list": {
"map": "function (doc) {\n if (doc._attachments) {\n for (const file_name in doc._attachments) {\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\n }\n} else {\n emit(doc._id.split('/'), 0)\n}\n}",
"reduce": "_stats"
}
},
"language": "javascript"
}
|
[
"os.unlink",
"os.walk",
"os.path.isfile",
"pathlib.Path",
"requests.post",
"os.path.join",
"mimetypes.guess_type",
"os.path.abspath",
"requests.get",
"requests.put",
"io.BytesIO",
"requests.head",
"os.path.basename",
"fnmatch.translate",
"re.compile",
"tempfile.NamedTemporaryFile",
"os.path.isdir",
"os.environ.get",
"logging.getLogger"
] |
[((240, 267), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (257, 267), False, 'import logging\n'), ((753, 778), 're.compile', 're.compile', (['CONNECTION_RE'], {}), '(CONNECTION_RE)\n', (763, 778), False, 'import re\n'), ((4038, 4067), 're.compile', 're.compile', (['"""[\\\\*\\\\?\\\\[\\\\]]+"""'], {}), "('[\\\\*\\\\?\\\\[\\\\]]+')\n", (4048, 4067), False, 'import re\n'), ((1341, 1388), 'requests.head', 'requests.head', (['f"""{self.db_uri}"""'], {'auth': 'self.auth'}), "(f'{self.db_uri}', auth=self.auth)\n", (1354, 1388), False, 'import requests\n'), ((1477, 1523), 'requests.put', 'requests.put', (['f"""{self.db_uri}"""'], {'auth': 'self.auth'}), "(f'{self.db_uri}', auth=self.auth)\n", (1489, 1523), False, 'import requests\n'), ((1675, 1713), 'requests.head', 'requests.head', (['doc_uri'], {'auth': 'self.auth'}), '(doc_uri, auth=self.auth)\n', (1688, 1713), False, 'import requests\n'), ((3404, 3513), 'requests.get', 'requests.get', (['f"""{self.db_uri}/_design/couchfs_views/_view/attachment_list"""'], {'params': 'params', 'auth': 'self.auth'}), "(f'{self.db_uri}/_design/couchfs_views/_view/attachment_list',\n params=params, auth=self.auth)\n", (3416, 3513), False, 'import requests\n'), ((6435, 6455), 'os.path.abspath', 'os.path.abspath', (['src'], {}), '(src)\n', (6450, 6455), False, 'import os\n'), ((6467, 6486), 'os.path.isfile', 'os.path.isfile', (['src'], {}), '(src)\n', (6481, 6486), False, 'import os\n'), ((7980, 8023), 'requests.head', 'requests.head', (['f"""{doc_uri}"""'], {'auth': 'self.auth'}), "(f'{doc_uri}', auth=self.auth)\n", (7993, 8023), False, 'import requests\n'), ((8406, 8436), 'mimetypes.guess_type', 'mimetypes.guess_type', (['src.name'], {}), '(src.name)\n', (8426, 8436), False, 'import mimetypes\n'), ((8526, 8596), 'requests.put', 'requests.put', (['f"""{file_uri}"""'], {'data': 'src', 'headers': 'headers', 'auth': 'self.auth'}), "(f'{file_uri}', data=src, headers=headers, auth=self.auth)\n", (8538, 8596), False, 'import requests\n'), ((856, 892), 'os.environ.get', 'os.environ.get', (['self.URI_ENVIRON_KEY'], {}), '(self.URI_ENVIRON_KEY)\n', (870, 892), False, 'import os\n'), ((1866, 1930), 'requests.put', 'requests.put', (['doc_uri'], {'json': 'doc', 'headers': 'headers', 'auth': 'self.auth'}), '(doc_uri, json=doc, headers=headers, auth=self.auth)\n', (1878, 1930), False, 'import requests\n'), ((4360, 4375), 're.compile', 're.compile', (['src'], {}), '(src)\n', (4370, 4375), False, 'import re\n'), ((5473, 5519), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'auth': 'self.auth'}), '(url, stream=True, auth=self.auth)\n', (5485, 5519), False, 'import requests\n'), ((6318, 6364), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'auth': 'self.auth'}), '(url, stream=True, auth=self.auth)\n', (6330, 6364), False, 'import requests\n'), ((6730, 6748), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (6743, 6748), False, 'import os\n'), ((7329, 7358), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7356, 7358), False, 'import tempfile\n'), ((7396, 7417), 'os.path.basename', 'os.path.basename', (['dst'], {}), '(dst)\n', (7412, 7417), False, 'import os\n'), ((1996, 2048), 'requests.post', 'requests.post', (['self.db_uri'], {'json': 'doc', 'auth': 'self.auth'}), '(self.db_uri, json=doc, auth=self.auth)\n', (2009, 2048), False, 'import requests\n'), ((3915, 3948), 'requests.get', 'requests.get', (['uri'], {'auth': 'self.auth'}), '(uri, auth=self.auth)\n', (3927, 3948), False, 'import requests\n'), ((4202, 4224), 'fnmatch.translate', 'fnmatch.translate', (['src'], {}), '(src)\n', (4219, 4224), False, 'import fnmatch\n'), ((5824, 5836), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5834, 5836), False, 'import io\n'), ((5971, 6012), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (5998, 6012), False, 'import tempfile\n'), ((6239, 6257), 'os.unlink', 'os.unlink', (['fp.name'], {}), '(fp.name)\n', (6248, 6257), False, 'import os\n'), ((6836, 6848), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (6843, 6848), False, 'import os\n'), ((2856, 2882), 'fnmatch.translate', 'fnmatch.translate', (['pattern'], {}), '(pattern)\n', (2873, 2882), False, 'import fnmatch\n'), ((4307, 4329), 'fnmatch.translate', 'fnmatch.translate', (['src'], {}), '(src)\n', (4324, 4329), False, 'import fnmatch\n'), ((4791, 4823), 'os.path.join', 'os.path.join', (['dst', 'dst_file_path'], {}), '(dst, dst_file_path)\n', (4803, 4823), False, 'import os\n'), ((5043, 5079), 'os.path.join', 'os.path.join', (['dst', 'dst_file_path[1:]'], {}), '(dst, dst_file_path[1:])\n', (5055, 5079), False, 'import os\n'), ((2937, 2963), 'fnmatch.translate', 'fnmatch.translate', (['pattern'], {}), '(pattern)\n', (2954, 2963), False, 'import fnmatch\n'), ((6766, 6783), 'pathlib.Path', 'pathlib.Path', (['src'], {}), '(src)\n', (6778, 6783), False, 'import pathlib\n'), ((6921, 6952), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (6933, 6952), False, 'import os\n'), ((7051, 7072), 'os.path.join', 'os.path.join', (['dst', 'pp'], {}), '(dst, pp)\n', (7063, 7072), False, 'import os\n'), ((6693, 6714), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (6709, 6714), False, 'import os\n')]
|
# http://www.codewars.com/kata/5390bac347d09b7da40006f6/
import string
def to_jaden_case(s):
return string.capwords(s)
|
[
"string.capwords"
] |
[((107, 125), 'string.capwords', 'string.capwords', (['s'], {}), '(s)\n', (122, 125), False, 'import string\n')]
|
"""
<NAME> 2017
scripts/annotate_variants.py
Use ANNOVAR to first convert a sample into annovar format and then annotate
"""
import os
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--merged', action='store_true',
help='use directory for merged VCFs')
parser.add_argument('-g', '--humanonly', action='store_true',
help='use humanonly directory for merged VCFs')
args = parser.parse_args()
merged = args.merged
humanonly = args.humanonly
if merged:
vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf')
annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs')
annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs')
else:
vcf_file_dir = os.path.join('results', 'gatk_vcfs')
annovar_file_dir = os.path.join('results', 'annovar_vcfs')
annotated_file_dir = os.path.join('results', 'annotated_vcfs')
if humanonly:
human_string = 'humanonly'
vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string)
annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string)
annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string)
annovar_dir = os.path.join('modules', 'annovar')
humandb_dir = os.path.join(annovar_dir, 'humandb/')
convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl')
table_annovar = os.path.join(annovar_dir, 'table_annovar.pl')
conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar)
anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir)
# Convert to annovar format
for vcf_file in os.listdir(vcf_file_dir):
if '.idx' not in vcf_file:
base_name = vcf_file.split('.')[0]
full_vcf_file = os.path.join(vcf_file_dir, vcf_file)
output_vcf_file = os.path.join(annovar_file_dir,
'{}.annovar.vcf'.format(base_name))
if not os.path.isfile(output_vcf_file):
file_command = '{} {} > {}'.format(conv_com, full_vcf_file,
output_vcf_file)
subprocess.call(file_command, shell=True)
# Annotate annovar formatted files with given databases
for annovar_file in os.listdir(annovar_file_dir):
base_name = annovar_file.split('.')[0]
full_annov_file = os.path.join(annovar_file_dir, annovar_file)
annotated_vcf_file = os.path.join(annotated_file_dir,
'{}.annotated'.format(base_name))
if not os.path.isfile(annotated_vcf_file):
file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 ' \
'-out {} -verbose -otherinfo -remove -protocol ' \
'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \
'-operation g,f,f,f -nastring . -csvout ' \
'-polish'.format(table_annovar, full_annov_file,
annotated_vcf_file)
subprocess.call(file_command, shell=True)
|
[
"argparse.ArgumentParser",
"os.path.isfile",
"subprocess.call",
"os.path.join",
"os.listdir"
] |
[((181, 206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (204, 206), False, 'import argparse\n'), ((1209, 1243), 'os.path.join', 'os.path.join', (['"""modules"""', '"""annovar"""'], {}), "('modules', 'annovar')\n", (1221, 1243), False, 'import os\n'), ((1258, 1295), 'os.path.join', 'os.path.join', (['annovar_dir', '"""humandb/"""'], {}), "(annovar_dir, 'humandb/')\n", (1270, 1295), False, 'import os\n'), ((1315, 1362), 'os.path.join', 'os.path.join', (['annovar_dir', '"""convert2annovar.pl"""'], {}), "(annovar_dir, 'convert2annovar.pl')\n", (1327, 1362), False, 'import os\n'), ((1379, 1424), 'os.path.join', 'os.path.join', (['annovar_dir', '"""table_annovar.pl"""'], {}), "(annovar_dir, 'table_annovar.pl')\n", (1391, 1424), False, 'import os\n'), ((1616, 1640), 'os.listdir', 'os.listdir', (['vcf_file_dir'], {}), '(vcf_file_dir)\n', (1626, 1640), False, 'import os\n'), ((2224, 2252), 'os.listdir', 'os.listdir', (['annovar_file_dir'], {}), '(annovar_file_dir)\n', (2234, 2252), False, 'import os\n'), ((561, 605), 'os.path.join', 'os.path.join', (['"""processed"""', '"""gatk_merged_vcf"""'], {}), "('processed', 'gatk_merged_vcf')\n", (573, 605), False, 'import os\n'), ((629, 675), 'os.path.join', 'os.path.join', (['"""results"""', '"""annovar_merged_vcfs"""'], {}), "('results', 'annovar_merged_vcfs')\n", (641, 675), False, 'import os\n'), ((701, 749), 'os.path.join', 'os.path.join', (['"""results"""', '"""annotated_merged_vcfs"""'], {}), "('results', 'annotated_merged_vcfs')\n", (713, 749), False, 'import os\n'), ((775, 811), 'os.path.join', 'os.path.join', (['"""results"""', '"""gatk_vcfs"""'], {}), "('results', 'gatk_vcfs')\n", (787, 811), False, 'import os\n'), ((835, 874), 'os.path.join', 'os.path.join', (['"""results"""', '"""annovar_vcfs"""'], {}), "('results', 'annovar_vcfs')\n", (847, 874), False, 'import os\n'), ((900, 941), 'os.path.join', 'os.path.join', (['"""results"""', '"""annotated_vcfs"""'], {}), "('results', 'annotated_vcfs')\n", (912, 941), False, 'import os\n'), ((2319, 2363), 'os.path.join', 'os.path.join', (['annovar_file_dir', 'annovar_file'], {}), '(annovar_file_dir, annovar_file)\n', (2331, 2363), False, 'import os\n'), ((1740, 1776), 'os.path.join', 'os.path.join', (['vcf_file_dir', 'vcf_file'], {}), '(vcf_file_dir, vcf_file)\n', (1752, 1776), False, 'import os\n'), ((2505, 2539), 'os.path.isfile', 'os.path.isfile', (['annotated_vcf_file'], {}), '(annotated_vcf_file)\n', (2519, 2539), False, 'import os\n'), ((2968, 3009), 'subprocess.call', 'subprocess.call', (['file_command'], {'shell': '(True)'}), '(file_command, shell=True)\n', (2983, 3009), False, 'import subprocess\n'), ((1924, 1955), 'os.path.isfile', 'os.path.isfile', (['output_vcf_file'], {}), '(output_vcf_file)\n', (1938, 1955), False, 'import os\n'), ((2105, 2146), 'subprocess.call', 'subprocess.call', (['file_command'], {'shell': '(True)'}), '(file_command, shell=True)\n', (2120, 2146), False, 'import subprocess\n')]
|
import numpy as np
#TODO:
#1. create a streamlined and replicable gif creation set of functions in this file.
#2. implement these functions into the generation algorithms available.
def convert_2d(index, cols):
return (index // cols, index % cols)
def bounds_check(index, rows, cols):
if index[0] < 0 or index[0] > rows - 1:
return True
if index[1] < 0 or index[1] > cols - 1:
return True
return False
def neighborCheck(grid, curr, rows, cols):
#order: Left, Right, Top, Down
ops = [(0,-1), (0,1), (-1,0), (1,0)]
#short for operations
ret = []
for i in range(4):
#bounds checking
x = curr.index[1] + ops[i][1]
y = curr.index[0] + ops[i][0]
if bounds_check((y,x), rows, cols):
continue
if grid[y][x].visited == False:
if curr.walls[i] != 'X':
ret.append(i)
return ret
def nbr_index(index, dir):
if dir == 'L':
return (index[0], index[1] - 1)
elif dir == 'R':
return (index[0], index[1] + 1)
elif dir == 'T':
return (index[0] - 1, index[1])
return (index[0] + 1, index[1])
def conv_nbr_wall(dir):
if dir == 'L':
return 1
elif dir == 'R':
return 0
elif dir == 'T':
return 2
return 3
def conv_idx_dir(index, nbr_index):
y = index[0] - nbr_index[0]
x = index[1] - nbr_index[1]
if x == 1:
return 'R'
if x == -1:
return 'L'
if y == 1:
return 'T'
if y == -1:
return 'D'
def print_grid(grid):
for i in range(len(grid)):
print("[", end="")
for j in range(len(grid[i])):
print(grid[i][j].walls, end=", ")
print("]")
def print_index(grid):
for i in range(len(grid)):
print("[", end="")
for j in range(len(grid[i])):
print(grid[i][j].index, end=", ")
print("]")
def print_visited(grid):
for i in range(len(grid)):
print("[", end="")
for j in range(len(grid[i])):
if grid[i][j].visited == True:
print('X', end=", ")
else:
print('O', end=", ")
print("]")
def maze_index(index, dir):
if dir == 0:
return (index[0], index[1] - 1)
elif dir == 1:
return (index[0], index[1] + 1)
elif dir == 2:
return (index[0] - 1, index[1])
return (index[0] + 1, index[1])
def create_snapshot(new_image, index, direction, color=None):
# set marking color to 255 (white) if none provided
if color == None:
color = 255
# assign the given color to the cell to mark it as active
new_image[index[0], index[1]] = color
if direction < 0:
return new_image
# find the index of the wall to break remove
mark_as_white = maze_index(index, direction)
# remove the wall (set it to the provided color)
new_image[mark_as_white[0], mark_as_white[1]] = color
return new_image
def grid_to_image(index):
return (index[0] * 2 + 1, index[1] * 2 + 1)
def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None):
# mark one or two changes, algorithm specific
if secondIdx == None:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color)
else:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color)
newIMG = create_snapshot(newIMG, secondIdx, -1, color)
if not np.array_equal(newIMG, gif_arr[-1]):
gif_arr.append(newIMG)
def mark_node(idx, gif_arr, secondIdx = None, color = None):
if secondIdx == None:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color)
else:
newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color)
newIMG = create_snapshot(newIMG, secondIdx, -1, color)
if not np.array_equal(newIMG, gif_arr[-1]):
gif_arr.append(newIMG)
def getNeighbor(grid, curr, rows, cols, previous):
#order: Left, Right, Top, Down
ops = [(0,-1), (0,1), (-1,0), (1,0)]
#short for operations
ret = []
for i in range(4):
#bounds checking
x = curr.index[1] + ops[i][1]
y = curr.index[0] + ops[i][0]
if bounds_check((y,x), rows, cols) or (y,x) == previous.index:
continue
ret.append(grid[y][x])
return ret
def print_maze(grid):
maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1))
maze[:,:] = '@'
for i in range(len(grid)):
for j in range(len(grid[i])):
for k in range(4):
idx = maze_index((i * 2 + 1,j * 2 + 1), k)
maze[i * 2 + 1, j * 2 + 1] = '+'
if grid[i][j].walls[k] == 'X':
if k == 0 or k == 1:
maze[idx[0], idx[1]] = '-'
else:
maze[idx[0], idx[1]] = '|'
for i in range(maze.shape[0]):
for j in range(maze.shape[1]):
print(maze[i,j].decode('utf-8'), end=" ")
print()
def countNeighbors(grid, index, rows, cols):
#order: Left, Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom Right
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
#short for operations
count = 0
for i in range(8):
#bounds checking
x = index[1] + ops[i][1]
y = index[0] + ops[i][0]
if bounds_check((y,x), rows, cols):
continue
if grid[y,x] == 255:
count += 1
return count
def checkRules(grid, index, rule):
c = countNeighbors(grid, index, grid.shape[0], grid.shape[1])
for character in rule:
if c == int(character):
return True
return False
def start_cells(grid, y, x, random, visited, unvisited):
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
dirs = random.sample(ops, k=len(ops))
count = 0
for index in dirs:
if count == len(dirs):
break
if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]):
if y + index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]:
continue
grid[y + index[0], x + index[1]] = 255
visited.add((y + index[0], x + index[1]))
update_set(y + index[0], x + index[1], visited, grid, unvisited)
count += 1
if count == 0:
return False
return True
def check_visited(y, x, visited):
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
for index in ops:
if (y + index[0], x + index[1]) in visited:
return True
return False
def update_set(y, x, all_nodes, grid, unvisited):
ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]
for index in ops:
if y + index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]:
continue
all_nodes.add((y,x))
if (y,x) in unvisited:
unvisited.remove((y,x))
|
[
"numpy.array_equal"
] |
[((3427, 3462), 'numpy.array_equal', 'np.array_equal', (['newIMG', 'gif_arr[-1]'], {}), '(newIMG, gif_arr[-1])\n', (3441, 3462), True, 'import numpy as np\n'), ((3805, 3840), 'numpy.array_equal', 'np.array_equal', (['newIMG', 'gif_arr[-1]'], {}), '(newIMG, gif_arr[-1])\n', (3819, 3840), True, 'import numpy as np\n')]
|
#use python3
import sys
def getNthFib(n):
if n == 2:
return 1
elif n == 1:
return 0
previous = 0
current = 1
for _ in range(n-2):
previous, current = current, previous + current
return current
if __name__ == '__main__':
# Expected only one integer as input
input = sys.stdin.read()
n = int(input)
print(getNthFib(n))
|
[
"sys.stdin.read"
] |
[((346, 362), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (360, 362), False, 'import sys\n')]
|
from os.path import join
import yaml
class Story:
def __init__(self, path):
self.path = path
self.author = None
self.title = None
self.scene = None
self.synopsis = None
def load(self):
f = open(self.filename(), 'r')
text = f.read()
f.close()
data = yaml.load(text)
if data == None:
return False
else:
self.__dict__ = data
return True
def filename(self):
return join(self.path, "story.yaml")
class Scene():
def __init__(self, path, state):
scene_file = state["current_scene"]
self.load(path, scene_file)
def load(self, path, scene_file):
f = open(self.filename(path, scene_file), 'r')
text = f.read()
f.close()
data = yaml.load(text)
if data == None:
return False
else:
self.__dict__ = data
self.__dict__["scene_map"] = self.__dict__["scene_map"].strip("\n")
return True
def filename(self, path, scene_file):
return join(path, "{0}.yaml".format(scene_file))
def disassemble_map(self):
rows = list(reversed(self.scene_map.split("\n")))
disassembled = []
for row in rows:
disassembled.append(list(row))
return disassembled
def reassemble_map(self, grid):
rows = []
for row in grid:
rows.append(''.join(row))
reassembled = "\n".join(list(reversed(rows)))
return reassembled
def build_map(self, state):
x = state["location"]["x"]
y = state["location"]["y"]
grid = self.disassemble_map()
grid[y][x] = "@"
if self.level in state["seen"]:
for item in self.items.values():
grid[item["y"]][item["x"]] = item["char"]
reassembled = self.reassemble_map(grid)
return reassembled
def valid_move(self, location, direction, times):
start_x = location["x"]
start_y = location["y"]
# validate direction and times
if not type(times) is int:
return False
if not type(direction) is str:
return False
if times < 1 or times > 100:
return False
if len(direction) > 1:
return False
if not direction in "nsew":
return False
# find new postion
x = start_x
y = start_y
rows = list(reversed(self.scene_map.split("\n")))
for i in range (0, times):
if direction == "n":
y += 1
elif direction == "s":
y -= 1
elif direction == "e":
x += 1
elif direction == "w":
x -= 1
if len(rows) <= y:
return False
if x < 0 or y < 0:
return False
tiles = list(rows[y])
if len(tiles) <= x:
return False
if tiles[x] != "#":
return False
return True
def view(self, location):
x = location["x"]
y = location["y"]
narration = None
for pview in self.views.values():
if pview["x"] == x and pview["y"] == y:
narration = pview["narration"]
return narration
def look(self, state):
seen = "\n".join(self.items.keys())
if self.level not in state["seen"]:
state["seen"].append(self.level)
return state, seen
def describe(self, state, char):
if self.level not in state["seen"]:
return state, None
items = self.items.values()
item_list = list(filter(lambda x: x["char"] == char, items))
if len(item_list) == 0:
return state, None
return state, item_list[0]["description"]
|
[
"yaml.load",
"os.path.join"
] |
[((333, 348), 'yaml.load', 'yaml.load', (['text'], {}), '(text)\n', (342, 348), False, 'import yaml\n'), ((511, 540), 'os.path.join', 'join', (['self.path', '"""story.yaml"""'], {}), "(self.path, 'story.yaml')\n", (515, 540), False, 'from os.path import join\n'), ((826, 841), 'yaml.load', 'yaml.load', (['text'], {}), '(text)\n', (835, 841), False, 'import yaml\n')]
|
import contextlib
import logging
import logging.config
import random
import time
from pathlib import Path
import hp_transfer_benchmarks # pylint: disable=unused-import
import hp_transfer_optimizers # pylint: disable=unused-import
import hydra
import numpy as np
import yaml
from gitinfo import gitinfo
from hp_transfer_optimizers.core import nameserver as hpns
from hp_transfer_optimizers.core import result as result_utils
from hp_transfer_optimizers.core.worker import Worker
from omegaconf import OmegaConf
from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row
logger = logging.getLogger("hp_transfer_aa_experiments.run")
def _read_reference_losses(args):
reference_losses = None
if args.runtype.type.startswith("eval_reference"):
reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path)
with Path(reference_losses_path).open("r") as stream:
reference_losses = yaml.safe_load(stream)
reference_losses = reference_losses[args.benchmark.name]
reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)]
reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)]
return reference_losses
def _get_trial_parameters(args, reference_losses, step):
if step == 1 and args.runtype.type in ["eval_dim", "eval_reference"]:
trials_per_task = args.runtype.dim_factor_pre_adjustment
else:
trials_per_task = args.runtype.dim_factor
logger.info(f"Using {trials_per_task} trials per task")
if step > 1 and args.runtype.type.startswith("eval_reference"):
trials_until_loss = reference_losses[step][f"{args.runtype.dim_factor}_loss"]
logger.info(
f"Also performing trials until loss {trials_until_loss :.4f}"
f" (max {10 * trials_per_task})"
)
else:
trials_until_loss = None
return trials_per_task, trials_until_loss
def _write_batch_result(args, result_batch):
batch_result_row = get_batch_result_row(
args.benchmark.name,
args.runtype.dim_factor_pre_adjustment,
args.approach.name,
args.benchmark.benchmark.trajectory_id,
args.benchmark.benchmark.adjustment_id,
args.run_id,
result_batch,
)
result_path = Path(
hydra.utils.to_absolute_path("results"),
args.experiment_group,
f"results/{args.experiment_name.replace('/', ',')}.csv",
)
result_path.parent.mkdir(exist_ok=True, parents=True)
with result_path.open("a") as result_stream:
result_stream.write("\t".join([str(value) for value in batch_result_row]) + "\n")
def _run_on_task_batch(
optimizer,
task_batch,
configspace,
step,
result_trajectory,
trials_per_task,
trials_until_loss,
args,
):
do_transfer = args.approach.name.startswith("transfer")
previous_results = result_trajectory if do_transfer else None
result_batch = result_utils.BatchResult(step, configspace)
for task in task_batch:
logger.info(f"Running on task {task.identifier}")
task_result = optimizer.run(
configspace=configspace,
task=task,
n_iterations=trials_per_task,
trials_until_loss=trials_until_loss,
previous_results=previous_results,
)
result_batch.insert(task_result, task)
if step > 1:
_write_batch_result(args, result_batch)
return result_batch
def _train_and_eval(optimizer, benchmark, args):
reference_losses = _read_reference_losses(args)
result_trajectory = result_utils.TrajectoryResult()
for step, (train_batch, configspace) in enumerate(
zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1
):
if args.runtype.type == "reference" and step == 1:
continue
logger.info(f"Step ------- {step :04d}")
trials_per_task, trials_until_loss = _get_trial_parameters(
args, reference_losses, step
)
logger.info(f"Using configspace\n{configspace}".rstrip())
batch_result = _run_on_task_batch(
optimizer,
train_batch,
configspace,
step,
result_trajectory,
trials_per_task,
trials_until_loss,
args,
)
result_trajectory.insert(batch_result)
class _HPOWorker(Worker):
def __init__(self, benchmark, **kwargs):
super().__init__(**kwargs)
# Only read task once
self._benchmark = benchmark
self._previous_task_identifier = None
self._previous_development_stage = None
self._task = None
# pylint: disable=unused-argument
def compute(
self,
config_id,
config,
budget,
working_directory,
*args,
**kwargs,
):
task_identifier = kwargs["task_identifier"]
development_stage = kwargs["development_stage"]
task_changed = (
development_stage != self._previous_development_stage
or self._previous_task_identifier != task_identifier
)
if task_changed: # Only read task once
self._previous_task_identifier = task_identifier
self._previous_development_stage = development_stage
self._task = self._benchmark.get_task_from_identifier(
task_identifier, development_stage
)
if "development_step" in config:
del config["development_step"]
return self._task.evaluate(config)
def _run_worker(args, benchmark, working_directory):
time.sleep(5) # short artificial delay to make sure the nameserver is already running
host = hpns.nic_name_to_host(args.nic_name)
w = _HPOWorker(
benchmark,
run_id=args.run_id,
host=host,
logger=logging.getLogger("worker"),
)
w.load_nameserver_credentials(working_directory=str(working_directory))
w.run(background=False)
def _run_master(args, benchmark, working_directory):
nameserver = hpns.NameServer(
run_id=args.run_id,
working_directory=str(working_directory),
nic_name=args.nic_name,
)
ns_host, ns_port = nameserver.start()
# Start a background worker for the master node
w = _HPOWorker(
benchmark,
run_id=args.run_id,
host=ns_host,
nameserver=ns_host,
nameserver_port=ns_port,
logger=logging.getLogger("worker"),
)
w.run(background=True)
# Create an optimizer
optimizer = hydra.utils.instantiate(
args.approach.approach,
host=ns_host,
nameserver=ns_host,
nameserver_port=ns_port,
logger=logging.getLogger("master"),
)
# Train and evaluate the optimizer
try:
_train_and_eval(optimizer, benchmark, args)
finally:
optimizer.shutdown(shutdown_workers=True)
nameserver.shutdown()
def _set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.deterministic = True
# torch.manual_seed(seed)
# tf.random.set_seed(seed)
@hydra.main(config_path="configs", config_name="run")
def run(args):
_set_seeds(args.seed)
working_directory = Path().cwd()
# Log general information
logger.info(f"Using working_directory={working_directory}")
with contextlib.suppress(TypeError):
git_info = gitinfo.get_git_info()
logger.info(f"Commit hash: {git_info['commit']}")
logger.info(f"Commit date: {git_info['author_date']}")
logger.info(f"Arguments:\n{OmegaConf.to_yaml(args)}")
# Construct benchmark
if "data_path" in args.benchmark.benchmark:
args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path(
args.benchmark.benchmark.data_path
)
benchmark = hydra.utils.instantiate(args.benchmark.benchmark)
# Actually run
if args.worker_id == 0:
_run_master(args, benchmark, working_directory)
else:
_run_worker(args, benchmark, working_directory)
logger.info(f"Run finished")
if __name__ == "__main__":
run() # pylint: disable=no-value-for-parameter
|
[
"omegaconf.OmegaConf.to_yaml",
"hp_transfer_aa_experiments.analyse.read_results.get_batch_result_row",
"numpy.random.seed",
"hydra.utils.to_absolute_path",
"hydra.utils.instantiate",
"contextlib.suppress",
"time.sleep",
"hp_transfer_optimizers.core.result.TrajectoryResult",
"pathlib.Path",
"random.seed",
"gitinfo.gitinfo.get_git_info",
"hydra.main",
"yaml.safe_load",
"hp_transfer_optimizers.core.nameserver.nic_name_to_host",
"logging.getLogger",
"hp_transfer_optimizers.core.result.BatchResult"
] |
[((608, 659), 'logging.getLogger', 'logging.getLogger', (['"""hp_transfer_aa_experiments.run"""'], {}), "('hp_transfer_aa_experiments.run')\n", (625, 659), False, 'import logging\n'), ((7222, 7274), 'hydra.main', 'hydra.main', ([], {'config_path': '"""configs"""', 'config_name': '"""run"""'}), "(config_path='configs', config_name='run')\n", (7232, 7274), False, 'import hydra\n'), ((2038, 2260), 'hp_transfer_aa_experiments.analyse.read_results.get_batch_result_row', 'get_batch_result_row', (['args.benchmark.name', 'args.runtype.dim_factor_pre_adjustment', 'args.approach.name', 'args.benchmark.benchmark.trajectory_id', 'args.benchmark.benchmark.adjustment_id', 'args.run_id', 'result_batch'], {}), '(args.benchmark.name, args.runtype.\n dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark\n .trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id,\n result_batch)\n', (2058, 2260), False, 'from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row\n'), ((2991, 3034), 'hp_transfer_optimizers.core.result.BatchResult', 'result_utils.BatchResult', (['step', 'configspace'], {}), '(step, configspace)\n', (3015, 3034), True, 'from hp_transfer_optimizers.core import result as result_utils\n'), ((3631, 3662), 'hp_transfer_optimizers.core.result.TrajectoryResult', 'result_utils.TrajectoryResult', ([], {}), '()\n', (3660, 3662), True, 'from hp_transfer_optimizers.core import result as result_utils\n'), ((5665, 5678), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5675, 5678), False, 'import time\n'), ((5763, 5799), 'hp_transfer_optimizers.core.nameserver.nic_name_to_host', 'hpns.nic_name_to_host', (['args.nic_name'], {}), '(args.nic_name)\n', (5784, 5799), True, 'from hp_transfer_optimizers.core import nameserver as hpns\n'), ((7022, 7039), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7033, 7039), False, 'import random\n'), ((7044, 7064), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7058, 7064), True, 'import numpy as np\n'), ((7933, 7982), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['args.benchmark.benchmark'], {}), '(args.benchmark.benchmark)\n', (7956, 7982), False, 'import hydra\n'), ((811, 867), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['args.reference_losses_path'], {}), '(args.reference_losses_path)\n', (839, 867), False, 'import hydra\n'), ((2342, 2381), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['"""results"""'], {}), "('results')\n", (2370, 2381), False, 'import hydra\n'), ((7457, 7487), 'contextlib.suppress', 'contextlib.suppress', (['TypeError'], {}), '(TypeError)\n', (7476, 7487), False, 'import contextlib\n'), ((7508, 7530), 'gitinfo.gitinfo.get_git_info', 'gitinfo.get_git_info', ([], {}), '()\n', (7528, 7530), False, 'from gitinfo import gitinfo\n'), ((7830, 7894), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['args.benchmark.benchmark.data_path'], {}), '(args.benchmark.benchmark.data_path)\n', (7858, 7894), False, 'import hydra\n'), ((961, 983), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (975, 983), False, 'import yaml\n'), ((5901, 5928), 'logging.getLogger', 'logging.getLogger', (['"""worker"""'], {}), "('worker')\n", (5918, 5928), False, 'import logging\n'), ((6505, 6532), 'logging.getLogger', 'logging.getLogger', (['"""worker"""'], {}), "('worker')\n", (6522, 6532), False, 'import logging\n'), ((6765, 6792), 'logging.getLogger', 'logging.getLogger', (['"""master"""'], {}), "('master')\n", (6782, 6792), False, 'import logging\n'), ((7340, 7346), 'pathlib.Path', 'Path', ([], {}), '()\n', (7344, 7346), False, 'from pathlib import Path\n'), ((7683, 7706), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['args'], {}), '(args)\n', (7700, 7706), False, 'from omegaconf import OmegaConf\n'), ((881, 908), 'pathlib.Path', 'Path', (['reference_losses_path'], {}), '(reference_losses_path)\n', (885, 908), False, 'from pathlib import Path\n')]
|
from qiniu import Auth, put_data
#需要填写你的 Access Key 和 Secret Key
access_key = '<KEY>'
secret_key = '<KEY>'
def image_url(image_data):
#构建鉴权对象
q = Auth(access_key, secret_key)
#要上传的空间
bucket_name = 'new3333'
#上传后保存的文件名
key = None
# 处理上传结果
token = q.upload_token(bucket_name, key, 3600)
ret, info = put_data(token, key, image_data)
print(ret)
print(info)
if info.status_code == 200:
return ret.get('key')
else:
return None
if __name__ == '__main__':
with open('./滑稽.jpg', 'rb') as f:
image_data = f.read()
image_url(image_data)
|
[
"qiniu.put_data",
"qiniu.Auth"
] |
[((156, 184), 'qiniu.Auth', 'Auth', (['access_key', 'secret_key'], {}), '(access_key, secret_key)\n', (160, 184), False, 'from qiniu import Auth, put_data\n'), ((338, 370), 'qiniu.put_data', 'put_data', (['token', 'key', 'image_data'], {}), '(token, key, image_data)\n', (346, 370), False, 'from qiniu import Auth, put_data\n')]
|
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabsreq9_detached_award_financial_assistance'
def test_column_headers(database):
expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. """
det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C',
awardee_or_recipient_legal='REDACTED')
det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='',
awardee_or_recipient_legal='Name')
# Test ignoring for D records
det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d',
awardee_or_recipient_legal=None)
det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d',
awardee_or_recipient_legal='Name')
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. """
det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None)
det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None,
awardee_or_recipient_legal='')
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2])
assert errors == 2
|
[
"tests.unit.dataactvalidator.utils.number_of_errors",
"tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory",
"tests.unit.dataactvalidator.utils.query_columns"
] |
[((666, 780), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': '"""C"""', 'awardee_or_recipient_legal': '"""REDACTED"""'}), "(correction_delete_indicatr='C',\n awardee_or_recipient_legal='REDACTED')\n", (705, 780), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((851, 960), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': '""""""', 'awardee_or_recipient_legal': '"""Name"""'}), "(correction_delete_indicatr='',\n awardee_or_recipient_legal='Name')\n", (890, 960), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((1067, 1175), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': '"""d"""', 'awardee_or_recipient_legal': 'None'}), "(correction_delete_indicatr='d',\n awardee_or_recipient_legal=None)\n", (1106, 1175), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((1248, 1354), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': '"""D"""', 'awardee_or_recipient_legal': '""""""'}), "(correction_delete_indicatr='D',\n awardee_or_recipient_legal='')\n", (1287, 1354), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((1369, 1479), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': '"""d"""', 'awardee_or_recipient_legal': '"""Name"""'}), "(correction_delete_indicatr='d',\n awardee_or_recipient_legal='Name')\n", (1408, 1479), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((1548, 1657), 'tests.unit.dataactvalidator.utils.number_of_errors', 'number_of_errors', (['_FILE', 'database'], {'models': '[det_award, det_award_2, det_award_3, det_award_4, det_award_5]'}), '(_FILE, database, models=[det_award, det_award_2,\n det_award_3, det_award_4, det_award_5])\n', (1564, 1657), False, 'from tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n'), ((1835, 1943), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': '"""c"""', 'awardee_or_recipient_legal': 'None'}), "(correction_delete_indicatr='c',\n awardee_or_recipient_legal=None)\n", (1874, 1943), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((1958, 2065), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', ([], {'correction_delete_indicatr': 'None', 'awardee_or_recipient_legal': '""""""'}), "(correction_delete_indicatr=None,\n awardee_or_recipient_legal='')\n", (1997, 2065), False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((2134, 2200), 'tests.unit.dataactvalidator.utils.number_of_errors', 'number_of_errors', (['_FILE', 'database'], {'models': '[det_award, det_award_2]'}), '(_FILE, database, models=[det_award, det_award_2])\n', (2150, 2200), False, 'from tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n'), ((444, 474), 'tests.unit.dataactvalidator.utils.query_columns', 'query_columns', (['_FILE', 'database'], {}), '(_FILE, database)\n', (457, 474), False, 'from tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n')]
|
import numpy as np
import pytest
from packaging.utils import Version
import fast_numpy_loops
old_numpy = Version(np.__version__) < Version('1.18')
@pytest.fixture(scope='session')
def initialize_fast_numpy_loops():
fast_numpy_loops.initialize()
@pytest.fixture(scope='function')
def rng():
if old_numpy:
class OldRNG(np.random.RandomState):
pass
rng = OldRNG(1234)
rng.random = rng.random_sample
rng.integers = rng.randint
return rng
else:
return np.random.default_rng(1234)
|
[
"numpy.random.default_rng",
"fast_numpy_loops.initialize",
"pytest.fixture",
"packaging.utils.Version"
] |
[((150, 181), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (164, 181), False, 'import pytest\n'), ((253, 285), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (267, 285), False, 'import pytest\n'), ((106, 129), 'packaging.utils.Version', 'Version', (['np.__version__'], {}), '(np.__version__)\n', (113, 129), False, 'from packaging.utils import Version\n'), ((132, 147), 'packaging.utils.Version', 'Version', (['"""1.18"""'], {}), "('1.18')\n", (139, 147), False, 'from packaging.utils import Version\n'), ((221, 250), 'fast_numpy_loops.initialize', 'fast_numpy_loops.initialize', ([], {}), '()\n', (248, 250), False, 'import fast_numpy_loops\n'), ((522, 549), 'numpy.random.default_rng', 'np.random.default_rng', (['(1234)'], {}), '(1234)\n', (543, 549), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
"""
Bunch of country related function to help get extended data.
"""
import json
import requests
# Load country codes file and store into variable
countries_file = open('assets/country-codes.json')
countries = json.load(countries_file)
countries_file.close()
# Load country lat and long file and store into variable
country_latlng_file = open('assets/countrycode-latlong.json')
country_latlng = json.load(country_latlng_file)
country_latlng_file.close()
def get_country_latlng(cc):
""" using country code find lat and long """
cc_key = cc.lower()
lat = country_latlng[cc_key]['lat']
lng = country_latlng[cc_key]['long']
return lat, lng
def get_country_code(country_name):
""" with country name find the country code """
for country in countries:
if country['name'].lower() == country_name.lower():
return country['code'].upper()
def get_country_name(country_code):
""" using the country code, find the name """
for country in countries:
if country['code'].lower() == country_code.lower():
return country['name'].capitalize()
def get_wikipedia_description(search):
""" Using wikipedia api to fetch descriptions """
"""
It's found that wikipedia's api is too slow that it takes a lot of time to
ingest the data, for now I decided to deactivate this as I want this up and
running quickly.
Descriptions will have to be called via the app (front-end)
"""
disable = True
if disable is False:
wiki_req = requests.get(
'https://en.wikipedia.org/w/api.php'
+ '?format=json'
+ '&action=query'
+ '&prop=extracts'
+ '&exintro='
+ '&explaintext='
+ '&titles={query}'
.format(query=search))
response = wiki_req.json()
pages = response['query']['pages']
description = ""
for value in pages.values():
if 'extract' in value:
description = value['extract']
else:
description = ""
break
else:
description = ""
return description
def insert_countries(db_func):
for country in countries:
record = dict()
record['name'] = country['name']
record['code'] = country['code']
record['lat'], record['lng'] = get_country_latlng(country['code'])
record['description'] = get_wikipedia_description(country['name'])
db_func(record)
def insert_cities(db_func):
with open('assets/cities.json') as cities_file:
cities = json.load(cities_file)
for city in cities:
record = dict()
record['name'] = city['name']
record['code'] = city['country']
record['country'] = get_country_name(city['country'])
record['lat'] = city['lat']
record['lng'] = city['lng']
record['description'] = get_wikipedia_description(city['name'])
db_func(record)
|
[
"json.load"
] |
[((231, 256), 'json.load', 'json.load', (['countries_file'], {}), '(countries_file)\n', (240, 256), False, 'import json\n'), ((417, 447), 'json.load', 'json.load', (['country_latlng_file'], {}), '(country_latlng_file)\n', (426, 447), False, 'import json\n'), ((2644, 2666), 'json.load', 'json.load', (['cities_file'], {}), '(cities_file)\n', (2653, 2666), False, 'import json\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Triplet loss with hard positive/negative mining"""
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.numpy as np
import mindspore.common.dtype as mstype
from mindspore import Tensor
class MarginRankingLoss(nn.Cell):
"""function MarginRankingLoss"""
def __init__(self, margin=0.0, reduction='mean'):
super(MarginRankingLoss, self).__init__()
self.reduction = reduction
self.margin = margin
self.sum = ops.ReduceSum(keep_dims=False)
def construct(self, input1, input2, target):
output = np.maximum(0, -target*(input1 - input2) + self.margin)
if self.reduction == 'mean':
output = np.mean(output)
elif self.reduction == 'sum':
output = self.sum(output, 0)
return output
class addmm(nn.Cell):
"""function _addmm"""
def construct(self, mat, alpha, beta, mat1, mat2):
out = ops.matmul(mat1, mat2)
return mat * alpha + out * beta
class TripletLoss(nn.Cell):
"""Triplet loss with hard positive/negative mining"""
def __init__(self, batch_size, margin=0.3):
super(TripletLoss, self).__init__()
self.addmm = addmm()
self.pow = ops.Pow()
self.equal = ops.Equal()
self.cast = ops.Cast()
self.select = ops.Select()
self.reducemax = ops.ReduceMax()
self.reducemin = ops.ReduceMin()
self.sum = ops.ReduceSum(keep_dims=True)
self.ranking_loss = MarginRankingLoss(margin=margin)
self.expand = ops.BroadcastTo((batch_size, batch_size))
self.zeros = Tensor(np.zeros((batch_size, batch_size)).astype(np.float32))
self.maxs = Tensor(np.full((batch_size, batch_size), 65535).astype(np.float32))
def construct(self, inputs, targets):
"""TripletLoss construct"""
inputs_ = self.pow(inputs, 2)
inputs_ = self.sum(inputs_, 1)
dist = self.expand(inputs_) # (32, 32)
dist = dist + dist.T
dist = self.addmm(dist, 1, -2, inputs, inputs.T)
dist = np.sqrt(np.clip(dist, xmin=1e-12, xmax=np.amax(dist)), dtype=dist.dtype)
targets = self.cast(targets, mstype.float32)
mask = self.equal(self.expand(targets), self.expand(targets).T)
dist_ap = self.select(mask, dist, self.zeros)
mask_zeros = self.equal(self.cast(mask, mstype.int32), self.zeros)
dist_an = self.select(mask_zeros, dist, self.maxs)
dist_ap = self.reducemax(dist_ap, 1)
dist_an = self.reducemin(dist_an, 1)
y = np.ones_like((dist_an))
loss = self.ranking_loss(dist_an, dist_ap, y)
return loss
|
[
"mindspore.ops.BroadcastTo",
"mindspore.numpy.amax",
"mindspore.ops.Cast",
"mindspore.numpy.full",
"mindspore.ops.ReduceSum",
"mindspore.numpy.mean",
"mindspore.ops.ReduceMax",
"mindspore.ops.Equal",
"mindspore.ops.Pow",
"mindspore.numpy.maximum",
"mindspore.numpy.zeros",
"mindspore.ops.ReduceMin",
"mindspore.ops.Select",
"mindspore.ops.matmul",
"mindspore.numpy.ones_like"
] |
[((1132, 1162), 'mindspore.ops.ReduceSum', 'ops.ReduceSum', ([], {'keep_dims': '(False)'}), '(keep_dims=False)\n', (1145, 1162), True, 'import mindspore.ops as ops\n'), ((1230, 1286), 'mindspore.numpy.maximum', 'np.maximum', (['(0)', '(-target * (input1 - input2) + self.margin)'], {}), '(0, -target * (input1 - input2) + self.margin)\n', (1240, 1286), True, 'import mindspore.numpy as np\n'), ((1579, 1601), 'mindspore.ops.matmul', 'ops.matmul', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (1589, 1601), True, 'import mindspore.ops as ops\n'), ((1870, 1879), 'mindspore.ops.Pow', 'ops.Pow', ([], {}), '()\n', (1877, 1879), True, 'import mindspore.ops as ops\n'), ((1901, 1912), 'mindspore.ops.Equal', 'ops.Equal', ([], {}), '()\n', (1910, 1912), True, 'import mindspore.ops as ops\n'), ((1933, 1943), 'mindspore.ops.Cast', 'ops.Cast', ([], {}), '()\n', (1941, 1943), True, 'import mindspore.ops as ops\n'), ((1966, 1978), 'mindspore.ops.Select', 'ops.Select', ([], {}), '()\n', (1976, 1978), True, 'import mindspore.ops as ops\n'), ((2004, 2019), 'mindspore.ops.ReduceMax', 'ops.ReduceMax', ([], {}), '()\n', (2017, 2019), True, 'import mindspore.ops as ops\n'), ((2045, 2060), 'mindspore.ops.ReduceMin', 'ops.ReduceMin', ([], {}), '()\n', (2058, 2060), True, 'import mindspore.ops as ops\n'), ((2080, 2109), 'mindspore.ops.ReduceSum', 'ops.ReduceSum', ([], {'keep_dims': '(True)'}), '(keep_dims=True)\n', (2093, 2109), True, 'import mindspore.ops as ops\n'), ((2193, 2234), 'mindspore.ops.BroadcastTo', 'ops.BroadcastTo', (['(batch_size, batch_size)'], {}), '((batch_size, batch_size))\n', (2208, 2234), True, 'import mindspore.ops as ops\n'), ((3201, 3222), 'mindspore.numpy.ones_like', 'np.ones_like', (['dist_an'], {}), '(dist_an)\n', (3213, 3222), True, 'import mindspore.numpy as np\n'), ((1343, 1358), 'mindspore.numpy.mean', 'np.mean', (['output'], {}), '(output)\n', (1350, 1358), True, 'import mindspore.numpy as np\n'), ((2263, 2297), 'mindspore.numpy.zeros', 'np.zeros', (['(batch_size, batch_size)'], {}), '((batch_size, batch_size))\n', (2271, 2297), True, 'import mindspore.numpy as np\n'), ((2345, 2385), 'mindspore.numpy.full', 'np.full', (['(batch_size, batch_size)', '(65535)'], {}), '((batch_size, batch_size), 65535)\n', (2352, 2385), True, 'import mindspore.numpy as np\n'), ((2751, 2764), 'mindspore.numpy.amax', 'np.amax', (['dist'], {}), '(dist)\n', (2758, 2764), True, 'import mindspore.numpy as np\n')]
|
from pathlib import Path
from unittest import TestCase
from gains.io import FitbodLoader
class TestFitbodLoader(TestCase):
data_filename = Path(__file__).parent / "data.csv"
def test_loading_data(self):
loader = FitbodLoader(self.data_filename)
analysis = loader.analysis
exercises = analysis.exercises
self.assertEqual(len(exercises), 890)
self.assertEqual(exercises[3].average_weight, 38.5)
|
[
"pathlib.Path",
"gains.io.FitbodLoader"
] |
[((233, 265), 'gains.io.FitbodLoader', 'FitbodLoader', (['self.data_filename'], {}), '(self.data_filename)\n', (245, 265), False, 'from gains.io import FitbodLoader\n'), ((147, 161), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'from pathlib import Path\n')]
|
from django.test import TestCase
from django_enum_choices.forms import EnumChoiceField
from .testapp.enumerations import CharTestEnum
class FormFieldTests(TestCase):
def test_field_instance_creates_choices_correctly(self):
instance = EnumChoiceField(CharTestEnum)
choices = instance.build_choices()
self.assertEqual(
choices,
[('first', 'first'),
('second', 'second'),
('third', 'third')]
)
def test_field_instance_creates_choices_correctly_with_custom_choice_builder(self):
def choice_builder(choice):
return 'Custom_' + choice.value, choice.value
instance = EnumChoiceField(CharTestEnum, choice_builder=choice_builder)
choices = instance.build_choices()
self.assertEqual(
choices,
[('Custom_first', 'first'),
('Custom_second', 'second'),
('Custom_third', 'third')]
)
|
[
"django_enum_choices.forms.EnumChoiceField"
] |
[((250, 279), 'django_enum_choices.forms.EnumChoiceField', 'EnumChoiceField', (['CharTestEnum'], {}), '(CharTestEnum)\n', (265, 279), False, 'from django_enum_choices.forms import EnumChoiceField\n'), ((685, 745), 'django_enum_choices.forms.EnumChoiceField', 'EnumChoiceField', (['CharTestEnum'], {'choice_builder': 'choice_builder'}), '(CharTestEnum, choice_builder=choice_builder)\n', (700, 745), False, 'from django_enum_choices.forms import EnumChoiceField\n')]
|
import xarray as xr
from mapshader.transforms import squeeze
from mapshader.transforms import cast
from mapshader.transforms import orient_array
from mapshader.transforms import flip_coords
from mapshader.transforms import reproject_raster
def run_float(input_file, output_file, chunks=(512, 512),
name='data', scale_factor=0.1, fill_value=-9999):
arr = xr.open_rasterio(input_file)
arr = squeeze(arr, 'band')
arr = cast(arr, dtype='float64')
arr = orient_array(arr)
arr = flip_coords(arr, dim='y') # do we need this?
arr = reproject_raster(arr, epsg=3857)
dataset = xr.Dataset({name: (['y', 'x'], arr.chunk(chunks))},
coords={'x': arr.coords['x'],
'y': arr.coords['y']})
dataset.attrs = dict(name=name)
dataset.to_netcdf(output_file, encoding={'data': {'dtype': 'int16',
'scale_factor': 0.1,
'_FillValue': -9999}})
def run_int(input_file, output_file, chunks=(512, 512),
name='data', fill_value=-9999):
arr = xr.open_rasterio(input_file)
arr = squeeze(arr, 'band')
arr = orient_array(arr)
arr = flip_coords(arr, dim='y') # do we need this?
arr = reproject_raster(arr, epsg=3857)
dataset = xr.Dataset({name: (['y', 'x'], arr.chunk(chunks))},
coords={'x': arr.coords['x'],
'y': arr.coords['y']})
dataset.attrs = dict(name=name)
dataset.to_netcdf(output_file, encoding={'data': {'dtype': 'int16',
'_FillValue': fill_value}})
if __name__ == '__main__':
import sys
from argparse import ArgumentParser
from os import path
parser = ArgumentParser()
parser.add_argument('-i')
parser.add_argument('-o')
parser.add_argument('-f')
parsed = parser.parse_args()
input_file = path.abspath(path.expanduser(parsed.i))
print(f'Converting {input_file} from TIFF to NetCDF File', file=sys.stdout)
if not parsed.o:
output_file = input_file.replace('.tif', '.nc')
else:
output_file = path.abspath(path.expanduser(parsed.o))
if parsed.f:
run_float(input_file, output_file)
else:
run_int(input_file, output_file)
print(f'Conversion Complete: {output_file}', file=sys.stdout)
|
[
"mapshader.transforms.squeeze",
"argparse.ArgumentParser",
"xarray.open_rasterio",
"mapshader.transforms.flip_coords",
"mapshader.transforms.reproject_raster",
"mapshader.transforms.orient_array",
"os.path.expanduser",
"mapshader.transforms.cast"
] |
[((375, 403), 'xarray.open_rasterio', 'xr.open_rasterio', (['input_file'], {}), '(input_file)\n', (391, 403), True, 'import xarray as xr\n'), ((414, 434), 'mapshader.transforms.squeeze', 'squeeze', (['arr', '"""band"""'], {}), "(arr, 'band')\n", (421, 434), False, 'from mapshader.transforms import squeeze\n'), ((445, 471), 'mapshader.transforms.cast', 'cast', (['arr'], {'dtype': '"""float64"""'}), "(arr, dtype='float64')\n", (449, 471), False, 'from mapshader.transforms import cast\n'), ((482, 499), 'mapshader.transforms.orient_array', 'orient_array', (['arr'], {}), '(arr)\n', (494, 499), False, 'from mapshader.transforms import orient_array\n'), ((510, 535), 'mapshader.transforms.flip_coords', 'flip_coords', (['arr'], {'dim': '"""y"""'}), "(arr, dim='y')\n", (521, 535), False, 'from mapshader.transforms import flip_coords\n'), ((566, 598), 'mapshader.transforms.reproject_raster', 'reproject_raster', (['arr'], {'epsg': '(3857)'}), '(arr, epsg=3857)\n', (582, 598), False, 'from mapshader.transforms import reproject_raster\n'), ((1149, 1177), 'xarray.open_rasterio', 'xr.open_rasterio', (['input_file'], {}), '(input_file)\n', (1165, 1177), True, 'import xarray as xr\n'), ((1188, 1208), 'mapshader.transforms.squeeze', 'squeeze', (['arr', '"""band"""'], {}), "(arr, 'band')\n", (1195, 1208), False, 'from mapshader.transforms import squeeze\n'), ((1219, 1236), 'mapshader.transforms.orient_array', 'orient_array', (['arr'], {}), '(arr)\n', (1231, 1236), False, 'from mapshader.transforms import orient_array\n'), ((1247, 1272), 'mapshader.transforms.flip_coords', 'flip_coords', (['arr'], {'dim': '"""y"""'}), "(arr, dim='y')\n", (1258, 1272), False, 'from mapshader.transforms import flip_coords\n'), ((1303, 1335), 'mapshader.transforms.reproject_raster', 'reproject_raster', (['arr'], {'epsg': '(3857)'}), '(arr, epsg=3857)\n', (1319, 1335), False, 'from mapshader.transforms import reproject_raster\n'), ((1827, 1843), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1841, 1843), False, 'from argparse import ArgumentParser\n'), ((1998, 2023), 'os.path.expanduser', 'path.expanduser', (['parsed.i'], {}), '(parsed.i)\n', (2013, 2023), False, 'from os import path\n'), ((2228, 2253), 'os.path.expanduser', 'path.expanduser', (['parsed.o'], {}), '(parsed.o)\n', (2243, 2253), False, 'from os import path\n')]
|
# -*- encoding: utf-8 -*-
def extract_feature_pixel(img, mask_1, mask_0=[], mask_2=[], mask_3=[], mask_4=[], mask_5=[], dim_prof=0):
import numpy as np
#Função de leitura da imagem e máscaras, e retorna array de pixel como atributo
n = img.shape[dim_prof]
t1 = img[mask_1].size/n
t0 = img[mask_0].size/n
t2 = img[mask_2].size/n
t3 = img[mask_3].size/n
t4 = img[mask_4].size/n
t5 = img[mask_5].size/n
ones = np.ones((t1,1))
eval1 = img[mask_1].reshape(n,-1).T
atr_slice = np.concatenate((eval1,ones), axis=1)
if mask_0!=[]:
zeros = np.zeros((t0,1))
eval0 = img[mask_0].reshape(n,-1).T
atr0 = np.concatenate((eval0,zeros), axis=1)
atr_slice = np.vstack([atr0,atr_slice])
if mask_2!=[]:
twos = np.ones((t2,1))*2
eval2 = img[mask_2].reshape(n,-1).T
atr2 = np.concatenate((eval2,twos), axis=1)
atr_slice = np.vstack([atr_slice,atr2])
if mask_3!=[]:
threes = np.ones((t3,1))*3
eval3 = img[mask_3].reshape(n,-1).T
atr3 = np.concatenate((eval3,threes), axis=1)
atr_slice = np.vstack([atr_slice,atr3])
if mask_4!=[]:
fours = np.ones((t4,1))*4
eval4 = img[mask_4].reshape(n,-1).T
atr4 = np.concatenate((eval4,fours), axis=1)
atr_slice = np.vstack([atr_slice,atr4])
if mask_5!=[]:
fives = np.ones((t5,1))*5
eval5 = img[mask_5].reshape(n,-1).T
atr5 = np.concatenate((eval5,fives), axis=1)
atr_slice = np.vstack([atr_slice,atr5])
return atr_slice
|
[
"numpy.vstack",
"numpy.zeros",
"numpy.ones",
"numpy.concatenate"
] |
[((451, 467), 'numpy.ones', 'np.ones', (['(t1, 1)'], {}), '((t1, 1))\n', (458, 467), True, 'import numpy as np\n'), ((523, 560), 'numpy.concatenate', 'np.concatenate', (['(eval1, ones)'], {'axis': '(1)'}), '((eval1, ones), axis=1)\n', (537, 560), True, 'import numpy as np\n'), ((596, 613), 'numpy.zeros', 'np.zeros', (['(t0, 1)'], {}), '((t0, 1))\n', (604, 613), True, 'import numpy as np\n'), ((672, 710), 'numpy.concatenate', 'np.concatenate', (['(eval0, zeros)'], {'axis': '(1)'}), '((eval0, zeros), axis=1)\n', (686, 710), True, 'import numpy as np\n'), ((730, 758), 'numpy.vstack', 'np.vstack', (['[atr0, atr_slice]'], {}), '([atr0, atr_slice])\n', (739, 758), True, 'import numpy as np\n'), ((870, 907), 'numpy.concatenate', 'np.concatenate', (['(eval2, twos)'], {'axis': '(1)'}), '((eval2, twos), axis=1)\n', (884, 907), True, 'import numpy as np\n'), ((927, 955), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr2]'], {}), '([atr_slice, atr2])\n', (936, 955), True, 'import numpy as np\n'), ((1069, 1108), 'numpy.concatenate', 'np.concatenate', (['(eval3, threes)'], {'axis': '(1)'}), '((eval3, threes), axis=1)\n', (1083, 1108), True, 'import numpy as np\n'), ((1128, 1156), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr3]'], {}), '([atr_slice, atr3])\n', (1137, 1156), True, 'import numpy as np\n'), ((1269, 1307), 'numpy.concatenate', 'np.concatenate', (['(eval4, fours)'], {'axis': '(1)'}), '((eval4, fours), axis=1)\n', (1283, 1307), True, 'import numpy as np\n'), ((1327, 1355), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr4]'], {}), '([atr_slice, atr4])\n', (1336, 1355), True, 'import numpy as np\n'), ((1468, 1506), 'numpy.concatenate', 'np.concatenate', (['(eval5, fives)'], {'axis': '(1)'}), '((eval5, fives), axis=1)\n', (1482, 1506), True, 'import numpy as np\n'), ((1526, 1554), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr5]'], {}), '([atr_slice, atr5])\n', (1535, 1554), True, 'import numpy as np\n'), ((793, 809), 'numpy.ones', 'np.ones', (['(t2, 1)'], {}), '((t2, 1))\n', (800, 809), True, 'import numpy as np\n'), ((992, 1008), 'numpy.ones', 'np.ones', (['(t3, 1)'], {}), '((t3, 1))\n', (999, 1008), True, 'import numpy as np\n'), ((1192, 1208), 'numpy.ones', 'np.ones', (['(t4, 1)'], {}), '((t4, 1))\n', (1199, 1208), True, 'import numpy as np\n'), ((1391, 1407), 'numpy.ones', 'np.ones', (['(t5, 1)'], {}), '((t5, 1))\n', (1398, 1407), True, 'import numpy as np\n')]
|
from django.urls import include, path, re_path
from . import views as school_views
from django.views.generic import TemplateView
app_name = 'schools'
urlpatterns = [
# if blank show districts(Thissur, palakkad...)
# path('', school_views.states, name='states'),
# if digit, view school information
re_path(r'^(?P<code>\d{5})/',school_views.school_view_kerala, name='school_view_kerala'),
re_path(r'^(?P<code>\d{4}\w{7})/',school_views.school_view, name='school_view'),
# if districts show districts(Thissur, palakkad...) of a state
path('<slug:state>/', school_views.districts, name='districts'),
# if character, show sub districts
path('<slug:state>/<district>/',school_views.sub_districts, name='sub_districts'),
path('<slug:state>/<district>/<sub_district>/',school_views.schools, name='schools'),
]
|
[
"django.urls.re_path",
"django.urls.path"
] |
[((304, 398), 'django.urls.re_path', 're_path', (['"""^(?P<code>\\\\d{5})/"""', 'school_views.school_view_kerala'], {'name': '"""school_view_kerala"""'}), "('^(?P<code>\\\\d{5})/', school_views.school_view_kerala, name=\n 'school_view_kerala')\n", (311, 398), False, 'from django.urls import include, path, re_path\n'), ((396, 482), 'django.urls.re_path', 're_path', (['"""^(?P<code>\\\\d{4}\\\\w{7})/"""', 'school_views.school_view'], {'name': '"""school_view"""'}), "('^(?P<code>\\\\d{4}\\\\w{7})/', school_views.school_view, name=\n 'school_view')\n", (403, 482), False, 'from django.urls import include, path, re_path\n'), ((543, 606), 'django.urls.path', 'path', (['"""<slug:state>/"""', 'school_views.districts'], {'name': '"""districts"""'}), "('<slug:state>/', school_views.districts, name='districts')\n", (547, 606), False, 'from django.urls import include, path, re_path\n'), ((646, 733), 'django.urls.path', 'path', (['"""<slug:state>/<district>/"""', 'school_views.sub_districts'], {'name': '"""sub_districts"""'}), "('<slug:state>/<district>/', school_views.sub_districts, name=\n 'sub_districts')\n", (650, 733), False, 'from django.urls import include, path, re_path\n'), ((730, 820), 'django.urls.path', 'path', (['"""<slug:state>/<district>/<sub_district>/"""', 'school_views.schools'], {'name': '"""schools"""'}), "('<slug:state>/<district>/<sub_district>/', school_views.schools, name=\n 'schools')\n", (734, 820), False, 'from django.urls import include, path, re_path\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.