code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Entry point for TwitOff Flask application."""
from web_app.app import create_app
APP = create_app()
|
[
"web_app.app.create_app"
] |
[((91, 103), 'web_app.app.create_app', 'create_app', ([], {}), '()\n', (101, 103), False, 'from web_app.app import create_app\n')]
|
import pickle
import numpy as np
import pandas as pd
from numpy import linalg as LA
from scipy import stats
import sys
def compute_rmse(target, prediction):
"""Compute rmse between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: rmse between the ground truth and forecasts
"""
return np.sqrt(mean_squared_error(target, prediction))
def compute_cosine(target, prediction):
"""Compute cosine simialrity between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: cosine simialrity between the ground truth and forecasts
"""
result = np.dot(target, prediction) / (LA.norm(target) * LA.norm(prediction))
return result
def r_squared(y_true, y_pred, y_mean=None):
"""Compute relative R^2 between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: relative R^2 between the ground truth and forecasts
"""
if y_mean is None:
y_mean = np.zeros(y_true.shape[0]) * np.mean(y_true)
rss = np.sum((y_true - y_pred)**2)
tss = np.sum((y_true - y_mean)**2)
rsq = 1 - rss / tss
return rsq
def print_eval_stats(eval_result):
"""Print the mean(se), median(se), 0.25 quantile(se), and 0.75 quantile (se) of the array, where se represents standard deviation
Args:
eval_result: a numpy array with evluation results
"""
print('mean: {:.4f} ({:.4f}) median {:.4f} ({:.4f})'.format(np.mean(eval_result),
stats.sem(eval_result),
np.median(eval_result),
quantile_se(eval_result, p=50)))
print('0.25 quantile: {:.4f} ({:.4f}) 0.75 quantile: {:.4f} ({:.4f})'.format(np.quantile(eval_result, 0.25),
quantile_se(eval_result, p=25),
np.quantile(eval_result, 0.75),
quantile_se(eval_result, p=75)))
def quantile_se(x, p=50):
# compute the standard error for different quantiles
# Source: <NAME>, "Mathematical Statistics". Springer Texts in Statistics, 1999. Page 306: Theorem 5.10
# p: quantile: int between 0-100
# x: data sequence
n = len(x) # number of samples
q = np.percentile(x, p)
density = stats.gaussian_kde(x) # density estimate of x
Fp = density(q).item()
p = p / 100.
sF = np.sqrt(p * (1 - p)) / Fp
se = sF / np.sqrt(n)
return se
def eval_forecast(model_name, rootpath, test_years, month_range, rep=False, num_rep=10):
"""Evalute the forecasts on training and test sets
Args:
model_name: a string indicating the name of a model
rootpath: the path where the forecasts are saved
test_years: a list of years in the test set
month_range: a list of months in the test set
rep: True or False, indicating if the reults include repeated runs
num_rep: the number of repetition
Returns:
result_train: the forecasting performance (temporal/spatial cosine/r2) on training set
result_test: the forecasting performance (temporal/spatial cosine/r2) on test set
"""
target_train = []
target_test = []
prediction_train = []
prediction_test = []
for year in test_years:
if year == 2020:
month_range = range(1, 7)
elif year == 2017:
month_range = range(7, 13)
else:
month_range = range(1, 13)
for month_id in month_range:
result_temp = load_results(rootpath + 'forecast_results/results_{}_{}_{}.pkl'.format(model_name, year, month_id))
target_train.append(result_temp['target_train'])
target_test.append(result_temp['target_test'])
if rep is True:
prediction_train_temp = np.zeros(result_temp['target_train'].shape)
prediction_test_temp = np.zeros(result_temp['target_test'].shape)
for i in range(num_rep):
prediction_train_temp += result_temp['prediction_train'][i]
prediction_test_temp += result_temp['prediction_test'][i]
prediction_train.append(prediction_train_temp / float(num_rep))
prediction_test.append(prediction_test_temp / float(num_rep))
else:
prediction_train.append(result_temp['prediction_train'])
prediction_test.append(result_temp['prediction_test'])
# test set evaluation
prediction_test = np.concatenate(prediction_test, axis=0)
target_test = np.concatenate(target_test, axis=0)
temporal_cos = np.zeros(prediction_test.shape[0])
spatial_cos = np.zeros(prediction_test.shape[1])
temporal_r2 = np.zeros(prediction_test.shape[0])
spatial_r2 = np.zeros(prediction_test.shape[1])
for i in range(prediction_test.shape[0]):
temporal_cos[i] = compute_cosine(target_test[i, :], prediction_test[i, :])
temporal_r2[i] = r_squared(target_test[i, :], prediction_test[i, :])
for i in range(prediction_test.shape[1]):
spatial_cos[i] = compute_cosine(target_test[:, i], prediction_test[:, i])
spatial_r2[i] = r_squared(target_test[:, i], prediction_test[:, i])
result_test = {}
result_test['temporal_cos'] = temporal_cos
result_test['spatial_cos'] = spatial_cos
result_test['temporal_r2'] = temporal_r2
result_test['spatial_r2'] = spatial_r2
# training set evaluation
prediction_train = np.concatenate(prediction_train, axis=0)
target_train = np.concatenate(target_train, axis=0)
temporal_cos_train = np.zeros(prediction_train.shape[0])
spatial_cos_train = np.zeros(prediction_train.shape[1])
temporal_r2_train = np.zeros(prediction_train.shape[0])
spatial_r2_train = np.zeros(prediction_train.shape[1])
for i in range(prediction_train.shape[0]):
temporal_cos_train[i] = compute_cosine(target_train[i, :], prediction_train[i, :])
temporal_r2_train[i] = r_squared(target_train[i, :], prediction_train[i, :])
for i in range(prediction_train.shape[1]):
spatial_cos_train[i] = compute_cosine(target_train[:, i], prediction_train[:, i])
spatial_r2_train[i] = r_squared(target_train[:, i], prediction_train[:, i])
result_train = {}
result_train['temporal_cos'] = temporal_cos_train
result_train['spatial_cos'] = spatial_cos_train
result_train['temporal_r2'] = temporal_r2_train
result_train['spatial_r2'] = spatial_r2_train
return result_train, result_test
|
[
"numpy.quantile",
"numpy.sum",
"numpy.median",
"numpy.zeros",
"scipy.stats.gaussian_kde",
"numpy.percentile",
"numpy.mean",
"numpy.linalg.norm",
"scipy.stats.sem",
"numpy.dot",
"numpy.concatenate",
"numpy.sqrt"
] |
[((1232, 1262), 'numpy.sum', 'np.sum', (['((y_true - y_pred) ** 2)'], {}), '((y_true - y_pred) ** 2)\n', (1238, 1262), True, 'import numpy as np\n'), ((1271, 1301), 'numpy.sum', 'np.sum', (['((y_true - y_mean) ** 2)'], {}), '((y_true - y_mean) ** 2)\n', (1277, 1301), True, 'import numpy as np\n'), ((2691, 2710), 'numpy.percentile', 'np.percentile', (['x', 'p'], {}), '(x, p)\n', (2704, 2710), True, 'import numpy as np\n'), ((2725, 2746), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['x'], {}), '(x)\n', (2743, 2746), False, 'from scipy import stats\n'), ((4908, 4947), 'numpy.concatenate', 'np.concatenate', (['prediction_test'], {'axis': '(0)'}), '(prediction_test, axis=0)\n', (4922, 4947), True, 'import numpy as np\n'), ((4966, 5001), 'numpy.concatenate', 'np.concatenate', (['target_test'], {'axis': '(0)'}), '(target_test, axis=0)\n', (4980, 5001), True, 'import numpy as np\n'), ((5021, 5055), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[0]'], {}), '(prediction_test.shape[0])\n', (5029, 5055), True, 'import numpy as np\n'), ((5074, 5108), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[1]'], {}), '(prediction_test.shape[1])\n', (5082, 5108), True, 'import numpy as np\n'), ((5127, 5161), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[0]'], {}), '(prediction_test.shape[0])\n', (5135, 5161), True, 'import numpy as np\n'), ((5179, 5213), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[1]'], {}), '(prediction_test.shape[1])\n', (5187, 5213), True, 'import numpy as np\n'), ((5878, 5918), 'numpy.concatenate', 'np.concatenate', (['prediction_train'], {'axis': '(0)'}), '(prediction_train, axis=0)\n', (5892, 5918), True, 'import numpy as np\n'), ((5938, 5974), 'numpy.concatenate', 'np.concatenate', (['target_train'], {'axis': '(0)'}), '(target_train, axis=0)\n', (5952, 5974), True, 'import numpy as np\n'), ((6000, 6035), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[0]'], {}), '(prediction_train.shape[0])\n', (6008, 6035), True, 'import numpy as np\n'), ((6060, 6095), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[1]'], {}), '(prediction_train.shape[1])\n', (6068, 6095), True, 'import numpy as np\n'), ((6120, 6155), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[0]'], {}), '(prediction_train.shape[0])\n', (6128, 6155), True, 'import numpy as np\n'), ((6179, 6214), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[1]'], {}), '(prediction_train.shape[1])\n', (6187, 6214), True, 'import numpy as np\n'), ((759, 785), 'numpy.dot', 'np.dot', (['target', 'prediction'], {}), '(target, prediction)\n', (765, 785), True, 'import numpy as np\n'), ((2825, 2845), 'numpy.sqrt', 'np.sqrt', (['(p * (1 - p))'], {}), '(p * (1 - p))\n', (2832, 2845), True, 'import numpy as np\n'), ((2865, 2875), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2872, 2875), True, 'import numpy as np\n'), ((789, 804), 'numpy.linalg.norm', 'LA.norm', (['target'], {}), '(target)\n', (796, 804), True, 'from numpy import linalg as LA\n'), ((807, 826), 'numpy.linalg.norm', 'LA.norm', (['prediction'], {}), '(prediction)\n', (814, 826), True, 'from numpy import linalg as LA\n'), ((1178, 1203), 'numpy.zeros', 'np.zeros', (['y_true.shape[0]'], {}), '(y_true.shape[0])\n', (1186, 1203), True, 'import numpy as np\n'), ((1206, 1221), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (1213, 1221), True, 'import numpy as np\n'), ((1646, 1666), 'numpy.mean', 'np.mean', (['eval_result'], {}), '(eval_result)\n', (1653, 1666), True, 'import numpy as np\n'), ((1732, 1754), 'scipy.stats.sem', 'stats.sem', (['eval_result'], {}), '(eval_result)\n', (1741, 1754), False, 'from scipy import stats\n'), ((1820, 1842), 'numpy.median', 'np.median', (['eval_result'], {}), '(eval_result)\n', (1829, 1842), True, 'import numpy as np\n'), ((2022, 2052), 'numpy.quantile', 'np.quantile', (['eval_result', '(0.25)'], {}), '(eval_result, 0.25)\n', (2033, 2052), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.quantile', 'np.quantile', (['eval_result', '(0.75)'], {}), '(eval_result, 0.75)\n', (2259, 2278), True, 'import numpy as np\n'), ((4215, 4258), 'numpy.zeros', 'np.zeros', (["result_temp['target_train'].shape"], {}), "(result_temp['target_train'].shape)\n", (4223, 4258), True, 'import numpy as np\n'), ((4298, 4340), 'numpy.zeros', 'np.zeros', (["result_temp['target_test'].shape"], {}), "(result_temp['target_test'].shape)\n", (4306, 4340), True, 'import numpy as np\n')]
|
"""
Description: Test the default which validates core and extensions
"""
__authors__ = "<NAME>", "<NAME>"
from stac_validator import stac_validator
def test_default_v070():
stac_file = "https://radarstac.s3.amazonaws.com/stac/catalog.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/catalog.json",
"asset_type": "CATALOG",
"validation_method": "default",
"schema": ["https://cdn.staclint.com/v0.7.0/catalog.json"],
"valid_stac": True,
}
]
def test_default_item_local_v080():
stac_file = "tests/test_data/v080/items/sample-full.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "0.8.0",
"path": "tests/test_data/v080/items/sample-full.json",
"schema": [
"https://cdn.staclint.com/v0.8.0/extension/eo.json",
"https://cdn.staclint.com/v0.8.0/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_v090():
stac = stac_validator.StacValidate("tests/test_data/v090/items/good_item_v090.json")
stac.run()
print(stac.message)
assert stac.message == [
{
"version": "0.9.0",
"path": "tests/test_data/v090/items/good_item_v090.json",
"schema": [
"https://cdn.staclint.com/v0.9.0/extension/eo.json",
"https://cdn.staclint.com/v0.9.0/extension/view.json",
"https://cdn.staclint.com/v0.9.0/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_v1beta1():
stac_file = "tests/test_data/1beta1/sentinel2.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1beta1/sentinel2.json",
"asset_type": "COLLECTION",
"version": "1.0.0-beta.1",
"validation_method": "default",
"schema": ["https://cdn.staclint.com/v1.0.0-beta.1/collection.json"],
"valid_stac": True,
}
]
def test_default_proj_v1b2():
stac_file = "https://earth-search.aws.element84.com/v0/collections/sentinel-s2-l1c/items/S2A_51SXT_20210415_0_L1C"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "1.0.0-beta.2",
"path": "https://earth-search.aws.element84.com/v0/collections/sentinel-s2-l1c/items/S2A_51SXT_20210415_0_L1C",
"schema": [
"https://cdn.staclint.com/v1.0.0-beta.1/extension/eo.json",
"https://cdn.staclint.com/v1.0.0-beta.1/extension/view.json",
"https://cdn.staclint.com/v1.0.0-beta.1/extension/projection.json",
"https://schemas.stacspec.org/v1.0.0-beta.2/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_simple_v1rc2():
stac_file = "tests/test_data/1rc2/simple-item.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1rc2/simple-item.json",
"asset_type": "ITEM",
"version": "1.0.0-rc.2",
"validation_method": "default",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json"
],
"valid_stac": True,
}
]
def test_default_extended_v1rc2():
stac_file = "tests/test_data/1rc2/extended-item.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/extended-item.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
"https://stac-extensions.github.io/view/v1.0.0/schema.json",
"https://stac-extensions.github.io/remote-data/v1.0.0/schema.json",
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_catalog_v1rc2():
stac_file = "tests/test_data/1rc2/catalog.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1rc2/catalog.json",
"asset_type": "CATALOG",
"version": "1.0.0-rc.2",
"validation_method": "default",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/catalog-spec/json-schema/catalog.json"
],
"valid_stac": True,
}
]
|
[
"stac_validator.stac_validator.StacValidate"
] |
[((260, 298), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (287, 298), False, 'from stac_validator import stac_validator\n'), ((773, 811), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (800, 811), False, 'from stac_validator import stac_validator\n'), ((1298, 1375), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['"""tests/test_data/v090/items/good_item_v090.json"""'], {}), "('tests/test_data/v090/items/good_item_v090.json')\n", (1325, 1375), False, 'from stac_validator import stac_validator\n'), ((2019, 2057), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (2046, 2057), False, 'from stac_validator import stac_validator\n'), ((2588, 2626), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (2615, 2626), False, 'from stac_validator import stac_validator\n'), ((3443, 3481), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (3470, 3481), False, 'from stac_validator import stac_validator\n'), ((3996, 4034), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (4023, 4034), False, 'from stac_validator import stac_validator\n'), ((4947, 4985), 'stac_validator.stac_validator.StacValidate', 'stac_validator.StacValidate', (['stac_file'], {}), '(stac_file)\n', (4974, 4985), False, 'from stac_validator import stac_validator\n')]
|
from django.db import models
from core.models import BaseModel
from django.utils.translation import gettext as _
# Create your models here.
class GalleryImage(BaseModel):
name = models.CharField(_("Picture Name"), max_length=50)
picture = models.ImageField(_("Image"), upload_to="img/gallery")
def __str__(self):
return self.name
class Meta:
db_table = ''
managed = True
verbose_name = 'Gallery Image'
verbose_name_plural = 'Gallery Images'
|
[
"django.utils.translation.gettext"
] |
[((203, 220), 'django.utils.translation.gettext', '_', (['"""Picture Name"""'], {}), "('Picture Name')\n", (204, 220), True, 'from django.utils.translation import gettext as _\n'), ((269, 279), 'django.utils.translation.gettext', '_', (['"""Image"""'], {}), "('Image')\n", (270, 279), True, 'from django.utils.translation import gettext as _\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 11:19:57 2018
@author: rwilson
"""
import pygmsh
import meshio
import numpy as np
import pickle
class utilities():
'''A collection of functions for interacting with the mesh object
'''
def meshOjfromDisk(meshObjectPath='cly.Mesh'):
'''Read the entire mesh object from disk.
Parameters
----------
meshObjectPath : str (default='cly.Mesh')
'''
with open(meshObjectPath, 'rb') as clyMesh_file:
return pickle.load(clyMesh_file)
class mesher():
'''Mesh generator class using pygmsh to gmsh code.
Parameters
----------
mesh_param : dict
Expected parameters defining the mesh, char_len, height, radius
cell_data : dict
Contains line, tetra, triangle, vertex 'gmsh:physical' and
'gmsh:geometrical'.
cells : dict
Contains line, tetra, triangle, vertex of the point indicies as defined in
``points``.
points : array(float)
Matrix of xyz coords for each point in the mesh domain
Notes
-----
Understanding the mesh structure
Points are a list of each point or verticies in x,y,z positions.
cell_data['tetra']['gmsh:physical'] : the physical values of each tetra
* cell['tetra'] : list of lists of each tetrahedral verticies index referencing to
the coords inside the points. [points[i1],
points[i2],
points[i3],
points[i4]]
'''
def __init__(self, mesh_param):
self.mesh_param = mesh_param
self.cell_data = None
self.points = None
self.cells = None
self.cell_cent = None
def meshIt(self):
'''Produces the mesh.
'''
self._cylinderMesh()
self._cellCent()
def _cylinderMesh(self):
''' Produce a cylindrical mesh
'''
# The geometry object
geom = pygmsh.opencascade.Geometry()
# Positions
btm_face = [0.0, 0.0, 0.0]
axis = [0.0, 0.0, self.mesh_param['height']]
# create the cylinder with open cascade
geom.add_cylinder(btm_face, axis, self.mesh_param['radius'],
char_length=self.mesh_param['char_len']
)
# Make the mesh
self.points, self.cells, _, self.cell_data, _ = pygmsh.generate_mesh(geom)
def _cellCent(self):
''' Calculate the centre of each tetra.
'''
# The verticies in cart coords
tetra_verts = [ np.array([self.points[vert[0]], self.points[vert[1]],
self.points[vert[2]], self.points[vert[3]]])
for vert in self.cells['tetra']]
# The centre of tetra in cart coords
self.cell_cent = [np.array([vert[:,0].sum()/4, vert[:,1].sum()/4, vert[:,2].sum()/4])
for vert in tetra_verts]
def saveMesh(self, name):
'''Save the mesh to file.
Parameters
----------
name : str
Name of the mesh file saved to the current directory.
'''
mesh = meshio.Mesh(self.points, self.cells, cell_data=self.cell_data)
meshio.write('%s.vtu' % name, mesh)
# meshio.write('%s.vtu' % name, self.points, self.cells, cell_data=self.cell_data)
# meshio.write('%s.msh4' % name, self.points, self.cells, cell_data=self.cell_data)
# meshio.gmsh_io.write('%s.msh' % name, self.points, self.cells, cell_data=self.cell_data)
def setCellsVal(self, cell_values):
'''Set each cell physical value.
Parameters
----------
cell_values : array/list
physical values of each tetra cell within the mesh domain in order
corresponding to ``points``.
'''
self.cell_data['tetra']['gmsh:physical'] = cell_values
def meshOjtoDisk(self):
'''Save the entire mesh object to disk
'''
with open('cly.Mesh', 'wb') as clyMesh_file:
pickle.dump(self, clyMesh_file)
def meshOjfromDisk(self):
'''Save the entire mesh object to disk
TODO
----
Should likely depreciate this function and simply use that stored in the utility class
'''
with open('cly.Mesh', 'rb') as clyMesh_file:
return pickle.load(clyMesh_file)
|
[
"pickle.dump",
"pygmsh.generate_mesh",
"meshio.write",
"pickle.load",
"numpy.array",
"pygmsh.opencascade.Geometry",
"meshio.Mesh"
] |
[((2184, 2213), 'pygmsh.opencascade.Geometry', 'pygmsh.opencascade.Geometry', ([], {}), '()\n', (2211, 2213), False, 'import pygmsh\n'), ((2589, 2615), 'pygmsh.generate_mesh', 'pygmsh.generate_mesh', (['geom'], {}), '(geom)\n', (2609, 2615), False, 'import pygmsh\n'), ((3385, 3447), 'meshio.Mesh', 'meshio.Mesh', (['self.points', 'self.cells'], {'cell_data': 'self.cell_data'}), '(self.points, self.cells, cell_data=self.cell_data)\n', (3396, 3447), False, 'import meshio\n'), ((3457, 3492), 'meshio.write', 'meshio.write', (["('%s.vtu' % name)", 'mesh'], {}), "('%s.vtu' % name, mesh)\n", (3469, 3492), False, 'import meshio\n'), ((549, 574), 'pickle.load', 'pickle.load', (['clyMesh_file'], {}), '(clyMesh_file)\n', (560, 574), False, 'import pickle\n'), ((2768, 2870), 'numpy.array', 'np.array', (['[self.points[vert[0]], self.points[vert[1]], self.points[vert[2]], self.\n points[vert[3]]]'], {}), '([self.points[vert[0]], self.points[vert[1]], self.points[vert[2]],\n self.points[vert[3]]])\n', (2776, 2870), True, 'import numpy as np\n'), ((4276, 4307), 'pickle.dump', 'pickle.dump', (['self', 'clyMesh_file'], {}), '(self, clyMesh_file)\n', (4287, 4307), False, 'import pickle\n'), ((4595, 4620), 'pickle.load', 'pickle.load', (['clyMesh_file'], {}), '(clyMesh_file)\n', (4606, 4620), False, 'import pickle\n')]
|
"""Test function for who_likes_it module."""
import pytest
TEST_DATA = [([], "no one likes this"),
(["Peter"], "Peter likes this"),
(["Jacob", "Alex"], "Jacob and Alex like this"),
(["Max", "John", "Mark"], "Max, John and Mark like this"),
(["Alex", "Jacob", "Mark", "Max"], "Alex, Jacob and 2 others like this"),
(['Ryan', 'Jonathon', 'Alexandra', 'Jeffery', 'Elizabeth', 'Gina',
'Kristina', 'Hannah', 'Crystal', 'Patrick', 'Brandon', 'Daniel',
'Christian'], "Ryan, Jonathon and 11 others like this"),
(['Ernest', 'Stephanie'], "Ernest and Stephanie like this"),
(['Angelica', 'Karen', 'Kevin', 'William', 'Michaela', 'Kelly',
'Ashley', 'Maria', 'Edward', 'Gregory', 'Sarah', 'Robert',
'Sergio', 'Marvin', 'Nicole', 'Jeremy', 'Charles', 'Sandra',
'Cindy', 'Thomas', 'Dan', 'Karla', 'Travis', 'Pamela',
'Kimberly', 'Robert', 'James', 'David', 'Geoffrey', 'Patrick',
'Nicole', 'Mitchell', 'Angela', 'Kayla', 'Madeline', 'Joann',
'Maria', 'Ryan', 'Michelle', 'William', 'Johnny', 'Michael',
'Patricia'], "Angelica, Karen and 41 others like this"),
(['Lisa', 'Katrina', 'Kelly', 'Kyle', 'Catherine', 'Kimberly',
'Mason', 'Diana', 'Samantha', 'Kimberly', 'Sherry', 'Joseph',
'Allison', 'Mark', 'Virginia', 'Christopher', 'Manuel',
'Michelle', 'Adam', 'Brenda', 'Bradley', 'Marissa', 'Carmen',
'Carol', 'Kathleen', 'Brandon', 'Richard', 'Tara', 'Bonnie',
'Richard', 'Bianca', 'Donald', 'Jonathan', 'Amanda', 'Jennifer',
'Veronica', 'Alison', 'Diane', 'Olivia', 'Joe', 'Janet',
'Stephanie', 'Scott', 'Dale', 'Natasha', 'Stephen', 'Laura',
'Brian', 'Lynn', 'Kurt', 'Julia', 'Janet', 'Cory', 'Cody',
'Mark', 'Elizabeth', 'Leslie', 'Bruce', 'Cindy', 'William',
'Devin', 'Michael', 'Paul', 'Lindsey', 'Julie', 'Michelle',
'Carla', 'Ian', 'Dennis', 'Lindsay', 'Rose', 'Emily', 'Jessica',
'Jerry', 'Riley', 'Jeffery', 'Steven', 'Alisha', 'Mark',
'Joseph', 'Andrew', 'Joshua', 'Nathan'], "Lisa, Katrina and 81 others like this")]
@pytest.mark.parametrize("string, result", TEST_DATA)
def test_likes(string, result):
"""Test for likes function."""
from who_likes_it import likes
assert likes(string) == result
|
[
"pytest.mark.parametrize",
"who_likes_it.likes"
] |
[((2356, 2408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""string, result"""', 'TEST_DATA'], {}), "('string, result', TEST_DATA)\n", (2379, 2408), False, 'import pytest\n'), ((2522, 2535), 'who_likes_it.likes', 'likes', (['string'], {}), '(string)\n', (2527, 2535), False, 'from who_likes_it import likes\n')]
|
import itertools
from typing import List, DefaultDict, Tuple
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# from sklearn.metrics import recall_score, accuracy_score, confusion_matrix
from sklearn.model_selection import KFold
from .categorical_encoders import LeaveOneOutEncoder
class LOOGridSearchCV:
"""
Specially prepared class to do grid search with cross-validation on our loo encoded
DataFrame.
Scores should be approximately ok, although i have no proof for that :)
"""
def __init__(self,
train_df: pd.DataFrame,
model,
params_grid: DefaultDict,
columns_to_encode: List,
columns_to_drop_from_training: List,
Xs_train: List[pd.DataFrame] = None,
ys_train: List[pd.DataFrame] = None,
Xs_val: List[pd.DataFrame] = None,
ys_val: List[pd.DataFrame] = None,
ohe_emails: bool = True,
mean: int = 1,
std: int = 0.05,
n_folds: int = 5,
encoded_df: pd.DataFrame = pd.DataFrame(),
) -> None:
"""
:param train_df: train_df (will be splitted then to train/and_val n_folds times)
:param model: model to train
:param params_grid: param_grid to search
:param columns_to_encode: categorical columns, which you want to encode using loo
:param columns_to_drop_from_training: columns to drop from training phase
:param ohe_emails: if set to True, performs OHE on emails column
:param Xs_train:
:param mean: mean to regularization part of the encoding
:param std: std to regularization part of the encoding
:param n_folds: n_folds to validate
:param encoded_df: if task was done before, just pass here already encoded_df
"
"""
self.processed_train_df = (train_df.copy(deep=True)
.reset_index()
.drop(columns='name'))
self.model = model
self.params_grid = params_grid
self.columns_to_encode = columns_to_encode
self.columns_to_drop_from_training = columns_to_drop_from_training
self.ohe_emails = ohe_emails
self.mean = mean
self.std = std
self.n_folds = n_folds
if not Xs_train:
self.Xs_train, self.ys_train, self.Xs_val, self.ys_val = ([] for i in range(4))
else:
self.Xs_train = Xs_train
self.ys_train = ys_train
self.Xs_val = Xs_val
self.ys_val = ys_val
self.encoded_df_ = encoded_df
# self.best_accuracy_estimator = None
# self.best_recall_estimator = None
self.best_roc_auc_estimator = None
def _ohe_emails(self) -> pd.DataFrame:
"""
internal method for one hot encoding emails column
"""
email_ohe_names = {0: '0_emails',
1: '1_email',
2: '2_emails',
3: '3_emails',
4: '4_emails',
5: '5_emails'}
self.processed_train_df = (pd.concat([self.processed_train_df, pd.get_dummies(
self.processed_train_df['emails'])], axis=1)
.rename(columns=email_ohe_names))
self.columns_to_drop_from_training.append('emails')
return self.processed_train_df
def _prepare_train_val_dfs(self):
"""
Internal method
bunch of code to prepare train and validation dataframes for given n_folds, to make
grid search and cross validation processes much faster: for each n_folds you will need
to compute encoded_df only once, same for validation and train DataFrames
"""
if self.ohe_emails:
X = self._ohe_emails()
else:
X = self.processed_train_df
if 'emails' in X.columns:
X['emails'] = X['emails'].astype(int)
y = self.processed_train_df[['target']]
X.drop(columns=self.columns_to_drop_from_training, inplace=True)
"""
to have each sample exactly once in validation set
"""
kf = KFold(n_splits=self.n_folds, shuffle=False, random_state=None)
splits = kf.split(X)
dfs_to_mean = []
for train_index, val_index in splits:
X_train, y_train = X.iloc[train_index], y.iloc[train_index]
X_val, y_val = X.iloc[val_index], y.iloc[val_index]
X_val.drop(columns=['target'], inplace=True)
enc = LeaveOneOutEncoder(train_df=X_train,
test_df=X_val,
columns_to_encode=self.columns_to_encode,
target_column='target',
random_state=42,
mean=self.mean,
std=self.std)
X_train, X_val = enc.fit()
encoded_cols = [col for col in X_train.columns if 'encoded_' in col]
dfs_to_mean.append(X_train[encoded_cols])
train_to_drop = self.columns_to_encode.copy()
train_to_drop.extend(['target'])
X_train.drop(columns=train_to_drop, inplace=True)
test_to_drop = self.columns_to_encode.copy()
X_val.drop(columns=test_to_drop, inplace=True)
self.Xs_train.append(X_train)
self.ys_train.append(y_train)
self.Xs_val.append(X_val)
self.ys_val.append(y_val)
"""
we are computing here the mean of the folds with excluding the 'i am now validation not the
training set' part, as I see it as the most proper thing to do, to use cross-validation
approach
"""
for df in dfs_to_mean:
zeros = [0 for col in df.columns]
for index in range(len(self.processed_train_df)):
if index not in df.index:
df.loc[index, :] = zeros
df.sort_index(inplace=True)
mean_df = dfs_to_mean[0].copy(deep=True)
mean_df = mean_df * 0
for num in range(self.n_folds):
mean_df = mean_df + dfs_to_mean[num]
self.encoded_df_ = mean_df.divide(self.n_folds - 1)
def best_roc_auc_estimator_(self, best_roc_auc_estimator):
self.best_roc_auc_estimator = best_roc_auc_estimator
"""
def best_accuracy_estimator_(self, best_accuracy_estimator):
self.best_accuracy_estimator = best_accuracy_estimator
def best_recall_estimator_(self, best_recall_estimator):
self.best_recall_estimator = best_recall_estimator
"""
def grid_search(self) -> Tuple[List, List, List, List]:
"""
performs GridSearchCV
:return: list with each of the models: accuracies, parameters, recalls and confusion
matrices
"""
if self.encoded_df_.empty:
self._prepare_train_val_dfs()
models_roc_auc_scores = []
# models_accuracies, models_recalls, models_parameters, models_cms = ([] for i in range(4))
for p in itertools.product(*self.params_grid.values()):
model_params = self.params_grid.copy()
for counter, key in enumerate(model_params.keys()):
model_params[key] = p[counter]
# models_parameters.append(model_params.items())
clf = clone(self.model)
clf = clf.set_params(**model_params)
cv_roc_auc_scores = []
# cv_accuracies, cv_recalls, cv_cms = ([] for i in range(3))
"""
fitting and predicting for all folds, then scoring them by:
accuracy, recall and confusion matrix
"""
for index in range(self.n_folds):
clf.fit(self.Xs_train[index], self.ys_train[index])
predictions = clf.predict(self.Xs_val[index])
cv_roc_auc_scores.append(roc_auc_score(self.ys_val[index], predictions))
# cv_accuracies.append(accuracy_score(self.ys_val[index], predictions))
# cv_recalls.append(recall_score(self.ys_val[index], predictions))
# cv_cms.append(confusion_matrix(self.ys_val[index], predictions))
"""
final evaluation of scores (means of all folds scores
for confusion matrix we can get not integer values, please treat this more informative
than strict - but anyway, as a source of information which model should we choose
"""
models_roc_auc_scores.append(np.mean(cv_roc_auc_scores))
# models_accuracies.append(np.mean(cv_accuracies))
# models_recalls.append(np.mean(cv_recalls))
# models_cms.append(np.mean(cv_cms, axis=0))
# if max(models_accuracies) == np.mean(cv_accuracies):
# self.best_accuracy_estimator_(clf)
# if max(models_recalls) == np.mean(cv_recalls):
# self.best_recall_estimator_(clf)
if max(models_roc_auc_scores) == np.mean(cv_roc_auc_scores):
self.best_roc_auc_estimator_(clf)
return models_roc_auc_scores
# return models_accuracies, models_parameters, models_recalls, models_cms
def processed_train(self):
"""
:return: processed train DataFrame with added encoded columns
"""
train = self.processed_train_df.copy(deep=True)
encoded = self.encoded_df_.copy(deep=True)
train = train.drop(columns=self.columns_to_encode+['target'])
processed_train = pd.concat([train, encoded], axis=1)
return processed_train
|
[
"pandas.DataFrame",
"pandas.get_dummies",
"sklearn.model_selection.KFold",
"sklearn.metrics.roc_auc_score",
"numpy.mean",
"pandas.concat",
"sklearn.base.clone"
] |
[((1195, 1209), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1207, 1209), True, 'import pandas as pd\n'), ((4373, 4435), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.n_folds', 'shuffle': '(False)', 'random_state': 'None'}), '(n_splits=self.n_folds, shuffle=False, random_state=None)\n', (4378, 4435), False, 'from sklearn.model_selection import KFold\n'), ((9819, 9854), 'pandas.concat', 'pd.concat', (['[train, encoded]'], {'axis': '(1)'}), '([train, encoded], axis=1)\n', (9828, 9854), True, 'import pandas as pd\n'), ((7623, 7640), 'sklearn.base.clone', 'clone', (['self.model'], {}), '(self.model)\n', (7628, 7640), False, 'from sklearn.base import clone\n'), ((8807, 8833), 'numpy.mean', 'np.mean', (['cv_roc_auc_scores'], {}), '(cv_roc_auc_scores)\n', (8814, 8833), True, 'import numpy as np\n'), ((9291, 9317), 'numpy.mean', 'np.mean', (['cv_roc_auc_scores'], {}), '(cv_roc_auc_scores)\n', (9298, 9317), True, 'import numpy as np\n'), ((8172, 8218), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['self.ys_val[index]', 'predictions'], {}), '(self.ys_val[index], predictions)\n', (8185, 8218), False, 'from sklearn.metrics import roc_auc_score\n'), ((3345, 3394), 'pandas.get_dummies', 'pd.get_dummies', (["self.processed_train_df['emails']"], {}), "(self.processed_train_df['emails'])\n", (3359, 3394), True, 'import pandas as pd\n')]
|
import sdi_utils.gensolution as gs
import subprocess
import io
import logging
import os
import string
import secrets
import base64
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
api.queue.append(msg.body)
class config:
## Meta data
config_params = dict()
version = "0.0.1"
tags = {'': ''}
operator_name = 'create_users'
operator_description = "Create Users Credentials"
operator_description_long = "Create User Credentials"
num_users = 90
config_params['num_users'] = {'title': 'Number of Users', 'description': 'Number of users', 'type': 'integer'}
root_name = 'TED_'
config_params['root_name'] = {'title': 'Root name', 'description': 'Root name for numbering', 'type': 'string'}
pwd_length = 6
config_params['pwd_length'] = {'title': 'Password Length', 'description': 'Password Length', 'type': 'integer'}
logger = logging.getLogger(name='distribute_users')
# set logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
log_stream = io.StringIO()
sh = logging.StreamHandler(stream=log_stream)
#sh.setFormatter(logging.Formatter('%(asctime)s ; %(levelname)s ; %(name)s ; %(message)s', datefmt='%H:%M:%S'))
api.logger.addHandler(sh)
def generate():
# base set for pwd generation
baseset = string.ascii_letters + string.digits
baseset = [i for i in baseset if not i in 'Il0O']
# User and pwd
idx_users_pwd = [[str(i-1),api.config.root_name + str(i), ''.join(secrets.choice(baseset) for n in range(api.config.pwd_length))]
for i in range(1, api.config.num_users + 1)]
#tenant = 'default'
#idx_users_pwd_base64 = [[u[0],u[1],u[2],str(base64.b64encode('{}\\{}:{}'.format(tenant,u[0],u[1]).encode('ascii')))[2:-1]] for u in users_pwd]
header = 'index,user,password\n'
users_csv_str = header + '\n'.join([','.join(elem) for elem in idx_users_pwd])
attributes = {"file": {"connection": {"configurationType": "Connection Management", "connectionID": ""},"path": "", "size": 0}}
msg = api.Message(attributes=attributes,body=users_csv_str)
api.send(outports[0]['name'],msg)
outports = [{'name': 'users', 'type': 'message.file',"description":"new user"}]
api.add_generator(generate)
def test_operator() :
api.config.num_users = 90
api.config.root_name = 'ted_'
api.config.pwd_length = 6
filename = 'DAT262_2.csv'
generate()
with open(os.path.join("/Users/Shared/data/registration",filename), 'w') as file:
for m in api.queue :
file.write(m)
|
[
"io.StringIO",
"logging.basicConfig",
"secrets.choice",
"logging.StreamHandler",
"os.path.join",
"logging.getLogger"
] |
[((1282, 1379), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (1301, 1379), False, 'import logging\n'), ((1388, 1401), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1399, 1401), False, 'import io\n'), ((1407, 1447), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'log_stream'}), '(stream=log_stream)\n', (1428, 1447), False, 'import logging\n'), ((1224, 1266), 'logging.getLogger', 'logging.getLogger', ([], {'name': '"""distribute_users"""'}), "(name='distribute_users')\n", (1241, 1266), False, 'import logging\n'), ((2781, 2838), 'os.path.join', 'os.path.join', (['"""/Users/Shared/data/registration"""', 'filename'], {}), "('/Users/Shared/data/registration', filename)\n", (2793, 2838), False, 'import os\n'), ((1835, 1858), 'secrets.choice', 'secrets.choice', (['baseset'], {}), '(baseset)\n', (1849, 1858), False, 'import secrets\n')]
|
import ast
import inspect
import textwrap
from .base import TohuBaseGenerator
from .ipython_support import get_ast_node_for_classes_defined_interactively_in_ipython
__all__ = ["Placeholder", "placeholder", "foreach"]
class Placeholder:
def __init__(self, name):
self.name = name
placeholder = Placeholder("<generic>")
def get_ast_node_for_classes_defined_in_source_files(cls):
orig_cls_source = textwrap.dedent(inspect.getsource(cls))
orig_cls_ast_node = ast.parse(orig_cls_source)
return orig_cls_ast_node
def get_cls_compiled_ast_node(cls):
try:
orig_cls_ast_node = get_ast_node_for_classes_defined_in_source_files(cls)
except TypeError as exc:
if exc.args[0] == "<module '__main__'> is a built-in class":
orig_cls_ast_node = get_ast_node_for_classes_defined_interactively_in_ipython(cls)
else:
# unexpected error; re-raise the exception
raise
orig_cls_compiled = compile(orig_cls_ast_node, "<string>", "exec")
return orig_cls_compiled
def reevaluate_class_definition(
orig_cls_compiled_ast_node, *, orig_cls_name, global_vars, local_vars, **custom_var_defs
):
my_global_vars = global_vars.copy()
my_global_vars.update(custom_var_defs)
my_global_vars.update(local_vars)
my_local_vars = {}
exec(orig_cls_compiled_ast_node, my_global_vars, my_local_vars)
# Sanity check to ensure the code only evaluated the expected class definition
assert list(my_local_vars.keys()) == [orig_cls_name], "Unexpected object(s) found during code evaluation."
reevaluated_cls = my_local_vars[orig_cls_name]
return reevaluated_cls
def restore_globals(global_vars, names, clashes):
for name in names:
if name in clashes:
# restore items that were previously defined
global_vars[name] = clashes[name]
else:
# remove items which didn't exist before
global_vars.pop(name)
def foreach(**var_defs):
new_names = var_defs.keys()
parent_frame = inspect.currentframe().f_back
global_vars = parent_frame.f_globals
local_vars = parent_frame.f_locals
clashes = {name: global_vars[name] for name in new_names if name in global_vars}
global_vars.update(var_defs)
def make_foreach_closure(cls):
if not inspect.isclass(cls):
raise TypeError(
f"Foreach decorator must be applied to a tohu generator class, not an object of type {type(cls)}."
)
if not issubclass(cls, TohuBaseGenerator):
raise TypeError("Decorated class must be a subclass of TohuBaseGenerator.")
orig_cls_compiled_ast_node = get_cls_compiled_ast_node(cls)
orig_cls_name = cls.__name__
class ForeachWrapper:
def __init__(self, *args, **kwargs):
self.init_args = args
self.init_kwargs = kwargs
def foreach(self, **custom_var_defs):
custom_var_names = list(custom_var_defs.keys())
missing_params = list(set(new_names).difference(custom_var_names))
extra_params = list(set(custom_var_names).difference(new_names))
if missing_params:
raise ValueError(f"Missing parameter(s): {', '.join(missing_params)!r}")
if extra_params:
raise ValueError(f"Extra parameter(s) provided: {', '.join(extra_params)!r}")
# Re-evaluate the class definition, including the previously missing
# variable values to replace the placeholders.
rewritten_cls = reevaluate_class_definition(
orig_cls_compiled_ast_node,
orig_cls_name=orig_cls_name,
global_vars=global_vars,
local_vars=local_vars,
**custom_var_defs,
)
return rewritten_cls(*self.init_args, **self.init_kwargs)
restore_globals(global_vars, new_names, clashes)
return ForeachWrapper
return make_foreach_closure
|
[
"inspect.isclass",
"ast.parse",
"inspect.getsource",
"inspect.currentframe"
] |
[((482, 508), 'ast.parse', 'ast.parse', (['orig_cls_source'], {}), '(orig_cls_source)\n', (491, 508), False, 'import ast\n'), ((434, 456), 'inspect.getsource', 'inspect.getsource', (['cls'], {}), '(cls)\n', (451, 456), False, 'import inspect\n'), ((2052, 2074), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (2072, 2074), False, 'import inspect\n'), ((2333, 2353), 'inspect.isclass', 'inspect.isclass', (['cls'], {}), '(cls)\n', (2348, 2353), False, 'import inspect\n')]
|
# coding: utf-8
import random
from hpopt.datasets.uci.dexter import load_corpus
from ..sklearn import SklearnClassifier
def main():
X, y = load_corpus()
random.seed(0)
classifier = SklearnClassifier(popsize=100, select=20, iters=10, timeout=10, verbose=True)
classifier.fit(X, y)
if __name__ == "__main__":
main()
|
[
"random.seed",
"hpopt.datasets.uci.dexter.load_corpus"
] |
[((146, 159), 'hpopt.datasets.uci.dexter.load_corpus', 'load_corpus', ([], {}), '()\n', (157, 159), False, 'from hpopt.datasets.uci.dexter import load_corpus\n'), ((165, 179), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (176, 179), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 16:12:40 2021
@author: Administrator
"""
import alphalens
import pandas as pd
import numpy as np
# import warnings
# warnings.filterwarnings('ignore')
ticker_sector = {
"ACN" : 0, "ATVI" : 0, "ADBE" : 0, "AMD" : 0, "AKAM" : 0, "ADS" : 0, "GOOGL" : 0, "GOOG" : 0,
"APH" : 0, "ADI" : 0, "ANSS" : 0, "AAPL" : 0, "AMAT" : 0, "ADSK" : 0, "ADP" : 0, "AVGO" : 0,
"AMG" : 1, "AFL" : 1, "ALL" : 1, "AXP" : 1, "AIG" : 1, "AMP" : 1, "AON" : 1, "AJG" : 1, "AIZ" : 1, "BAC" : 1,
"BK" : 1, "BBT" : 1, "BRK.B" : 1, "BLK" : 1, "HRB" : 1, "BHF" : 1, "COF" : 1, "CBOE" : 1, "SCHW" : 1, "CB" : 1,
"ABT" : 2, "ABBV" : 2, "AET" : 2, "A" : 2, "ALXN" : 2, "ALGN" : 2, "AGN" : 2, "ABC" : 2, "AMGN" : 2, "ANTM" : 2,
"BCR" : 2, "BAX" : 2, "BDX" : 2, "BIIB" : 2, "BSX" : 2, "BMY" : 2, "CAH" : 2, "CELG" : 2, "CNC" : 2, "CERN" : 2,
"MMM" : 3, "AYI" : 3, "ALK" : 3, "ALLE" : 3, "AAL" : 3, "AME" : 3, "AOS" : 3, "ARNC" : 3, "BA" : 3, "CHRW" : 3,
"CAT" : 3, "CTAS" : 3, "CSX" : 3, "CMI" : 3, "DE" : 3, "DAL" : 3, "DOV" : 3, "ETN" : 3, "EMR" : 3, "EFX" : 3,
"AES" : 4, "LNT" : 4, "AEE" : 4, "AEP" : 4, "AWK" : 4, "CNP" : 4, "CMS" : 4, "ED" : 4, "D" : 4, "DTE" : 4,
"DUK" : 4, "EIX" : 4, "ETR" : 4, "ES" : 4, "EXC" : 4, "FE" : 4, "NEE" : 4, "NI" : 4, "NRG" : 4, "PCG" : 4,
"ARE" : 5, "AMT" : 5, "AIV" : 5, "AVB" : 5, "BXP" : 5, "CBG" : 5, "CCI" : 5, "DLR" : 5, "DRE" : 5,
"EQIX" : 5, "EQR" : 5, "ESS" : 5, "EXR" : 5, "FRT" : 5, "GGP" : 5, "HCP" : 5, "HST" : 5, "IRM" : 5, "KIM" : 5,
"APD" : 6, "ALB" : 6, "AVY" : 6, "BLL" : 6, "CF" : 6, "DWDP" : 6, "EMN" : 6, "ECL" : 6, "FMC" : 6, "FCX" : 6,
"IP" : 6, "IFF" : 6, "LYB" : 6, "MLM" : 6, "MON" : 6, "MOS" : 6, "NEM" : 6, "NUE" : 6, "PKG" : 6, "PPG" : 6,
"T" : 7, "CTL" : 7, "VZ" : 7,
"MO" : 8, "ADM" : 8, "BF.B" : 8, "CPB" : 8, "CHD" : 8, "CLX" : 8, "KO" : 8, "CL" : 8, "CAG" : 8,
"STZ" : 8, "COST" : 8, "COTY" : 8, "CVS" : 8, "DPS" : 8, "EL" : 8, "GIS" : 8, "HSY" : 8, "HRL" : 8,
"AAP" : 9, "AMZN" : 9, "APTV" : 9, "AZO" : 9, "BBY" : 9, "BWA" : 9, "KMX" : 9, "CCL" : 9,
"APC" : 10, "ANDV" : 10, "APA" : 10, "BHGE" : 10, "COG" : 10, "CHK" : 10, "CVX" : 10, "XEC" : 10, "CXO" : 10,
"COP" : 10, "DVN" : 10, "EOG" : 10, "EQT" : 10, "XOM" : 10, "HAL" : 10, "HP" : 10, "HES" : 10, "KMI" : 10
}
import pandas_datareader.data as web
pan = web.DataReader(list(ticker_sector.keys()), "yahoo", '2014-12-01', '2017-07-01')
# pan.to_pickle(r'xuan wang\pan.pkl')
# pan = pd.read_pickle(r'xuan wang\pan.pkl')
lookahead_bias_days = 5
pan = pan.transpose(2,1,0)
predictive_factor = pan.loc[:,:,'Open']
predictive_factor = predictive_factor.pct_change(lookahead_bias_days)
# introduce look-ahead bias and make the factor predictive
predictive_factor = predictive_factor.shift(-lookahead_bias_days)
predictive_factor = predictive_factor.stack()
predictive_factor.index = predictive_factor.index.set_names(['date', 'asset'])
pricing = pan.loc[:,:,'Open'].iloc[1:]
pricing.head()
sector_names = {
0 : "information_technology",
1 : "financials",
2 : "health_care",
3 : "industrials",
4 : "utilities",
5 : "real_estate",
6 : "materials",
7 : "telecommunication_services",
8 : "consumer_staples",
9 : "consumer_discretionary",
10 : "energy"
}
factor_data = alphalens.utils.get_clean_factor_and_forward_returns(predictive_factor,
pricing,
quantiles=5,
bins=None,
groupby=ticker_sector,
groupby_labels=sector_names)
|
[
"alphalens.utils.get_clean_factor_and_forward_returns"
] |
[((3350, 3515), 'alphalens.utils.get_clean_factor_and_forward_returns', 'alphalens.utils.get_clean_factor_and_forward_returns', (['predictive_factor', 'pricing'], {'quantiles': '(5)', 'bins': 'None', 'groupby': 'ticker_sector', 'groupby_labels': 'sector_names'}), '(predictive_factor,\n pricing, quantiles=5, bins=None, groupby=ticker_sector, groupby_labels=\n sector_names)\n', (3402, 3515), False, 'import alphalens\n')]
|
# ============LICENSE_START====================================================
# org.onap.dcae
# =============================================================================
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
# =============================================================================
# Copyright (c) 2021 highstreet technologies GmbH. All rights reserved.
# =============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END======================================================
from setuptools import setup, find_packages
setup(
name="aoconversion",
version="1.0.6-SNAPSHOT",
packages=find_packages(exclude=["tests.*", "tests"]),
author="<NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>",
description="Service to create DCAE artifacts from acumos models",
url="",
install_requires=["docker>=4.0.0,<5.0.0", "jsonschema", "PyYAML", "requests"],
package_data={'aoconversion': ['index.html']},
entry_points={
"console_scripts": [
"acumos-adapter=aoconversion.adapter:adapter"
]
}
)
|
[
"setuptools.find_packages"
] |
[((1224, 1267), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests.*', 'tests']"}), "(exclude=['tests.*', 'tests'])\n", (1237, 1267), False, 'from setuptools import setup, find_packages\n')]
|
import os
import shutil
import tempfile
def ensure_dir(path):
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
def move_file(source, target):
ensure_dir(target)
shutil.move(source, target)
def copy_file(source, target):
if isinstance(source, (tuple, list)):
source = os.path.join(*source)
if isinstance(target, (tuple, list)):
target = os.path.join(*target)
ensure_dir(target)
shutil.copy(source, target)
def write_file(path, text):
with tempfile.NamedTemporaryFile("wt", delete=False, encoding="utf-8") as file:
file.write(text)
file.flush()
move_file(file.name, path)
|
[
"tempfile.NamedTemporaryFile",
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"shutil.move",
"os.path.join",
"shutil.copy"
] |
[((78, 99), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (93, 99), False, 'import os\n'), ((237, 264), 'shutil.move', 'shutil.move', (['source', 'target'], {}), '(source, target)\n', (248, 264), False, 'import shutil\n'), ((487, 514), 'shutil.copy', 'shutil.copy', (['source', 'target'], {}), '(source, target)\n', (498, 514), False, 'import shutil\n'), ((156, 176), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (167, 176), False, 'import os\n'), ((357, 378), 'os.path.join', 'os.path.join', (['*source'], {}), '(*source)\n', (369, 378), False, 'import os\n'), ((438, 459), 'os.path.join', 'os.path.join', (['*target'], {}), '(*target)\n', (450, 459), False, 'import os\n'), ((554, 619), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""wt"""'], {'delete': '(False)', 'encoding': '"""utf-8"""'}), "('wt', delete=False, encoding='utf-8')\n", (581, 619), False, 'import tempfile\n'), ((123, 146), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (137, 146), False, 'import os\n')]
|
import rapprentice, os, os.path as osp
from rapprentice.call_and_print import call_and_print
assert osp.basename(os.getcwd()) == "test"
call_and_print("python tps_unit_tests.py")
call_and_print("python ../scripts/download_sampledata.py ~/Data --use_rsync")
call_and_print("python ../scripts/generate_h5.py ~/Data/sampledata/overhand/overhand.yaml")
call_and_print("python test_registration_synthetic.py --plotting=0")
|
[
"os.getcwd",
"rapprentice.call_and_print.call_and_print"
] |
[((136, 178), 'rapprentice.call_and_print.call_and_print', 'call_and_print', (['"""python tps_unit_tests.py"""'], {}), "('python tps_unit_tests.py')\n", (150, 178), False, 'from rapprentice.call_and_print import call_and_print\n'), ((179, 256), 'rapprentice.call_and_print.call_and_print', 'call_and_print', (['"""python ../scripts/download_sampledata.py ~/Data --use_rsync"""'], {}), "('python ../scripts/download_sampledata.py ~/Data --use_rsync')\n", (193, 256), False, 'from rapprentice.call_and_print import call_and_print\n'), ((257, 358), 'rapprentice.call_and_print.call_and_print', 'call_and_print', (['"""python ../scripts/generate_h5.py ~/Data/sampledata/overhand/overhand.yaml"""'], {}), "(\n 'python ../scripts/generate_h5.py ~/Data/sampledata/overhand/overhand.yaml'\n )\n", (271, 358), False, 'from rapprentice.call_and_print import call_and_print\n'), ((349, 417), 'rapprentice.call_and_print.call_and_print', 'call_and_print', (['"""python test_registration_synthetic.py --plotting=0"""'], {}), "('python test_registration_synthetic.py --plotting=0')\n", (363, 417), False, 'from rapprentice.call_and_print import call_and_print\n'), ((113, 124), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (122, 124), False, 'import rapprentice, os, os.path as osp\n')]
|
from collections import deque
def main():
N, Q = map(int, input().split())
path_dat = [list(map(int, input().split())) for _ in range(N - 1)]
queries = [list(map(int, input().split())) for _ in range(Q)]
paths = [[] for _ in range(N)]
for a, b in path_dat:
a -= 1
b -= 1
paths[a].append(b)
paths[b].append(a)
dist = [-1] * N
dist[0] = 0
queue = deque([0])
while queue:
now = queue.popleft()
for nxt in paths[now]:
if dist[nxt] != -1:
continue
dist[nxt] = dist[now] + 1
queue.append(nxt)
for c, d in queries:
c -= 1
d -= 1
tmp = dist[c] + dist[d]
if tmp % 2 == 0:
print('Town')
else:
print('Road')
main()
|
[
"collections.deque"
] |
[((413, 423), 'collections.deque', 'deque', (['[0]'], {}), '([0])\n', (418, 423), False, 'from collections import deque\n')]
|
#!/usr/bin/python
# Copyright 2002 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import re
def match_re(actual, expected):
return re.match(expected, actual, re.DOTALL) != None
t = BoostBuild.Tester(match=match_re)
t.set_tree('testing-primitives')
# We expect t5 and t7's output to be dumped to stdout.
t.run_build_system(stdout=r'''.*failing t5.*failing t7''')
t.expect_addition('t2.txt')
t.expect_addition('t3.txt')
t.expect_addition('t5.out')
t.expect_addition('t6.out')
t.expect_addition('t6.txt')
t.expect_addition('t7.out')
t.expect_addition('t7.txt')
t.expect_addition('t8.out')
t.expect_nothing_more()
t.cleanup()
|
[
"re.match",
"BoostBuild.Tester"
] |
[((313, 346), 'BoostBuild.Tester', 'BoostBuild.Tester', ([], {'match': 'match_re'}), '(match=match_re)\n', (330, 346), False, 'import BoostBuild\n'), ((262, 299), 're.match', 're.match', (['expected', 'actual', 're.DOTALL'], {}), '(expected, actual, re.DOTALL)\n', (270, 299), False, 'import re\n')]
|
import pytest
from dz4.calculator.calculator import Calculator
@pytest.fixture()
def calculator() -> Calculator:
return Calculator()
|
[
"dz4.calculator.calculator.Calculator",
"pytest.fixture"
] |
[((67, 83), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (81, 83), False, 'import pytest\n'), ((127, 139), 'dz4.calculator.calculator.Calculator', 'Calculator', ([], {}), '()\n', (137, 139), False, 'from dz4.calculator.calculator import Calculator\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: actions.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='actions.proto',
package='Proto.JSON',
syntax='proto3',
serialized_pb=b'\n\ractions.proto\x12\nProto.JSON\"\xaf\x01\n\raction_object\x12\x0e\n\x06\x61\x63tion\x18\x01 \x01(\t\x12\x45\n\nparameters\x18\x02 \x03(\x0b\x32\x31.Proto.JSON.action_object.action_parameter_object\x1aG\n\x17\x61\x63tion_parameter_object\x12\x14\n\x0cparameterKey\x18\x01 \x01(\t\x12\x16\n\x0eparameterValue\x18\x02 \x01(\tB/\n\x1enet.ktc.miles.model.proto.JSONB\x0b\x41\x63tionsJSONH\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ACTION_OBJECT_ACTION_PARAMETER_OBJECT = _descriptor.Descriptor(
name='action_parameter_object',
full_name='Proto.JSON.action_object.action_parameter_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameterKey', full_name='Proto.JSON.action_object.action_parameter_object.parameterKey', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameterValue', full_name='Proto.JSON.action_object.action_parameter_object.parameterValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=134,
serialized_end=205,
)
_ACTION_OBJECT = _descriptor.Descriptor(
name='action_object',
full_name='Proto.JSON.action_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='Proto.JSON.action_object.action', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='Proto.JSON.action_object.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ACTION_OBJECT_ACTION_PARAMETER_OBJECT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=205,
)
_ACTION_OBJECT_ACTION_PARAMETER_OBJECT.containing_type = _ACTION_OBJECT
_ACTION_OBJECT.fields_by_name['parameters'].message_type = _ACTION_OBJECT_ACTION_PARAMETER_OBJECT
DESCRIPTOR.message_types_by_name['action_object'] = _ACTION_OBJECT
action_object = _reflection.GeneratedProtocolMessageType('action_object', (_message.Message,), dict(
action_parameter_object = _reflection.GeneratedProtocolMessageType('action_parameter_object', (_message.Message,), dict(
DESCRIPTOR = _ACTION_OBJECT_ACTION_PARAMETER_OBJECT,
__module__ = 'actions_pb2'
# @@protoc_insertion_point(class_scope:Proto.JSON.action_object.action_parameter_object)
))
,
DESCRIPTOR = _ACTION_OBJECT,
__module__ = 'actions_pb2'
# @@protoc_insertion_point(class_scope:Proto.JSON.action_object)
))
_sym_db.RegisterMessage(action_object)
_sym_db.RegisterMessage(action_object.action_parameter_object)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\036net.ktc.miles.model.proto.JSONB\013ActionsJSONH\001')
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor_pb2.FileOptions",
"google.protobuf.descriptor.FileDescriptor"
] |
[((394, 420), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (418, 420), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((438, 931), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""actions.proto"""', 'package': '"""Proto.JSON"""', 'syntax': '"""proto3"""', 'serialized_pb': 'b\'\\n\\ractions.proto\\x12\\nProto.JSON"\\xaf\\x01\\n\\raction_object\\x12\\x0e\\n\\x06action\\x18\\x01 \\x01(\\t\\x12E\\n\\nparameters\\x18\\x02 \\x03(\\x0b21.Proto.JSON.action_object.action_parameter_object\\x1aG\\n\\x17action_parameter_object\\x12\\x14\\n\\x0cparameterKey\\x18\\x01 \\x01(\\t\\x12\\x16\\n\\x0eparameterValue\\x18\\x02 \\x01(\\tB/\\n\\x1enet.ktc.miles.model.proto.JSONB\\x0bActionsJSONH\\x01b\\x06proto3\''}), '(name=\'actions.proto\', package=\'Proto.JSON\',\n syntax=\'proto3\', serialized_pb=\n b\'\\n\\ractions.proto\\x12\\nProto.JSON"\\xaf\\x01\\n\\raction_object\\x12\\x0e\\n\\x06action\\x18\\x01 \\x01(\\t\\x12E\\n\\nparameters\\x18\\x02 \\x03(\\x0b21.Proto.JSON.action_object.action_parameter_object\\x1aG\\n\\x17action_parameter_object\\x12\\x14\\n\\x0cparameterKey\\x18\\x01 \\x01(\\t\\x12\\x16\\n\\x0eparameterValue\\x18\\x02 \\x01(\\tB/\\n\\x1enet.ktc.miles.model.proto.JSONB\\x0bActionsJSONH\\x01b\\x06proto3\'\n )\n', (464, 931), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4317, 4345), 'google.protobuf.descriptor_pb2.FileOptions', 'descriptor_pb2.FileOptions', ([], {}), '()\n', (4343, 4345), False, 'from google.protobuf import descriptor_pb2\n'), ((2759, 3078), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""parameters"""', 'full_name': '"""Proto.JSON.action_object.parameters"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='parameters', full_name=\n 'Proto.JSON.action_object.parameters', index=1, number=2, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None)\n", (2786, 3078), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
from typing import Optional
from psychopy import core, visual
from katona.logic import eventsmeasures
from katona.logic.datasaver import DataSaver
from katona.logic.movement import StickMover
from katona.logic import eventhandler
from katona.logic import essential
from katona.visual import optional
from katona.visual.grid import VisualGrid
class Katona:
def __init__(self,
window: visual.Window,
stick_length: float,
stick_width: float,
field_size: int,
grid_color: str,
data_fp: str,
experiment_info: dict,
special_event_finder: str,
event_threshold: int = 30,
solution=None,
time_limit: int = 15,
background_color="white",
screen_cover_color="white",
time_to_cover_grid=None,
feedback=None,
):
self._window = window
self._clock = core.Clock()
self._feedback_type = feedback
self._stop_questioning_stick_color = False
self._time_limit = time_limit * 60
self._moves_made = 0
self._correct_answers = 0
self.solved: Optional[bool] = None
self.grid_color = grid_color
self.grid = VisualGrid(window=window,
stick_length=stick_length,
stick_width=stick_width,
field_size=field_size,
grid_color=self.grid_color,
)
self.stick_mover = StickMover(window=window)
if special_event_finder == "ObjectiveImpasseFinder":
self._special_event_finder = eventsmeasures.ObjectiveImpasseFinder(
minimum_data_to_identify=event_threshold)
self.data_saver = DataSaver(save_folder=data_fp,
experiment_info=experiment_info,
special_event_finder=self._special_event_finder,
)
button_x_pos = (self.grid.outer_border + window.size[0] / 2) / 2
self._reset_button = optional.Button(win=window,
event_name="default place",
label_size=40,
label_text="Обновить",
pos=(button_x_pos, 0),
button_enabled=True,
button_color=background_color,
)
self._impasse_button = optional.Button(win=window,
event_name="impasse",
label_size=40,
label_text="Я застрял",
pos=(-button_x_pos, 0),
button_enabled=True,
button_color=background_color,
)
self._scree_cover = optional.ScreenCover(window=window,
start_position=(0, self.grid.outer_border),
size=self.grid.outer_border,
cover_color=screen_cover_color,
grow_rate=0.5,
time_to_cover=time_to_cover_grid)
solution_absolute_idx = {}
for solution_name in solution:
solution_absolute_idx[solution_name] = {}
for solution_info_name, solution_info in solution[solution_name].items():
solution_absolute_idx[solution_name][solution_info_name] = \
tuple([self.grid.extract_grid_element_by_grid_idx(grid_idx).grid_idx
for grid_idx in solution_info])
self._answer_checker = essential.AnswerChecker(solution_absolute_idx)
if feedback == "sound":
sound_path = f"sounds/{experiment_info['feedback.type']}.wav"
if experiment_info['feedback.type'] == "positive":
self._event_feedback = eventhandler.PositiveSoundFeedback(sound_path)
else:
self._event_feedback = eventhandler.NegativeSoundFeedback(sound_path)
elif feedback == "image":
image_path = f"images/{experiment_info['feedback.type']}.jpg"
if experiment_info['feedback.type'] == "positive":
self._event_feedback = eventhandler.PositiveImageFeedback(window, image_path)
else:
self._event_feedback = eventhandler.NegativeImageFeedback(window, image_path)
elif feedback == "phrases":
import csv
phrases_fp = "text/phrases.csv"
column = experiment_info['feedback.type']
phrases = []
with open(file=phrases_fp, mode="r", encoding="utf-8") as csv_file:
phrases_file = csv.DictReader(f=csv_file)
for row in phrases_file:
phrases.append(row[column])
max_width = window.size[0] / 2 - self.grid.outer_border
self._event_feedback = eventhandler.TextTimeHandler(window,
phrases_list=phrases,
phrase_time_showed=10,
time_between_phrases=60,
position=(-button_x_pos, 0),
width=max_width * 0.8)
self._impasse_button = optional.FakeButton()
elif feedback is None:
self._event_feedback = eventhandler.ZeroHandler()
self._color_group = experiment_info['feedback.type']
self._chosen_colors = {}
def create_movable_sticks(self,
grid_indexes,
movable_stick_color,
color_positions: str = "all"):
# сохранить информацию о цветах палочек, если условие с окрашиванием палочек
if isinstance(self._event_feedback, eventhandler.ZeroHandler):
self._chosen_colors[movable_stick_color[1]] = f"neutral.{movable_stick_color[1]}"
if self._color_group == "important likable":
self._chosen_colors[movable_stick_color[0]] = f"like.{movable_stick_color[0]}"
self._chosen_colors[movable_stick_color[2]] = f"dislike.{movable_stick_color[2]}"
elif self._color_group == "unimportant likable":
self._chosen_colors[movable_stick_color[2]] = f"like.{movable_stick_color[2]}"
self._chosen_colors[movable_stick_color[0]] = f"dislike.{movable_stick_color[0]}"
elif self._color_group == "control":
self._chosen_colors[movable_stick_color[0]] = f"like.{movable_stick_color[0]}"
self._chosen_colors[movable_stick_color[2]] = f"dislike.{movable_stick_color[2]}"
movable_stick_color = movable_stick_color[1]
self.grid.create_movable_sticks(grid_indexes=grid_indexes,
movable_stick_color=movable_stick_color,
color_positions=color_positions)
def run(self):
if self._clock.getTime() >= self._time_limit:
self.solved = False
self.data_saver.save_failure_to_solve(9999)
return
if self._event_feedback.is_in_progress():
return
if self._moves_made != 3:
self.stick_mover.check_and_execute_moves(movable_sticks=self.grid.movable_elements,
grid_elements=self.grid.grid_elements)
if not self._stop_questioning_stick_color and \
self.stick_mover.chosen_stick is not None:
stick_color_hex = getattr(self.stick_mover.chosen_stick.visual_element, "fillColor")
stick_color_like = self._chosen_colors[stick_color_hex]
self.data_saver.get_stick_color(stick_color_like)
self._stop_questioning_stick_color = True
mouse_last_click = self.stick_mover.last_click
solving = self._answer_checker.is_approach_solution(mouse_last_click)
if self._feedback_type == "phrases":
self._event_feedback.on_event(solving)
if not self.stick_mover.move_made:
self.data_saver.get_click(mouse_last_click)
else:
self._correct_answers += solving
self._event_feedback.on_event(solving)
self.data_saver.get_event_feedback(self._event_feedback.is_new_event())
self.data_saver.get_click(mouse_last_click)
self._moves_made += 1
if self._correct_answers == 3:
self.solved = True
self._stop_questioning_stick_color = False
if self.stick_mover.chosen_stick is None and self._reset_button.button_pressed():
self.data_saver.get_click(self._reset_button.last_click)
self.stick_mover.release_stick()
self.return_to_default()
self._answer_checker.reset()
self._scree_cover.resize()
if self._impasse_button.button_pressed():
self.data_saver.get_click(self._impasse_button.last_click)
def get_moves_made(self):
return self._moves_made
def return_to_default(self):
self._moves_made = 0
self._correct_answers = 0
self.grid.return_to_default_positions()
def start_time(self):
self.stick_mover.reset_time()
self._reset_button.reset_time()
self._impasse_button.reset_time()
self._clock.reset()
self._event_feedback.reset_time()
def draw(self):
self._reset_button.draw()
self._impasse_button.draw()
self.grid.draw()
if self._moves_made == 3 and not self.solved and not self._event_feedback.is_in_progress():
self._scree_cover.draw()
class TrainingOnGrid:
def __init__(self,
window: visual.Window,
stick_length: float,
stick_width: float,
field_size: int,
grid_color: str,
movable_stick_color: str,
):
self.training_finished = False
self.grid_color = grid_color
self.movable_stick_color = movable_stick_color
self._window = window
target_grid_positions = ((7,), (1, 15), (),)
self._target_grid_positions = iter(target_grid_positions)
self._target_grid_marks = []
self._current_target_grid_positions = None
self.grid = VisualGrid(window=self._window,
stick_length=stick_length,
stick_width=stick_width,
field_size=field_size,
grid_color=self.grid_color,
)
self._training_sticks_position = iter((((0, -1),), ((0, 0),), ((0, 1), (0, 2))))
self._training_sticks_marks = []
conditions = (self.first_condition, self.second_condition, lambda _, __: None)
self._conditions = iter(conditions)
self._current_condition = None
training_messages = ("Нажмите на палочку c точкой и поставьте её в ячейку с крестиком",
"Палочки можно вращать, для этого возьмите её и покрутите колесо мыши.\n"
"Теперь расположите палочки с точками в ячейках с крестиками.",
"Кроме уже сделаного на экране могут быть кнопки, на них нужно просто нажимать.\n"
"Нажмите на кнопку на экране.")
self._training_messages = iter(training_messages)
self.stick_mover = StickMover(window=self._window)
text_y_pos = (self.grid.outer_border + window.size[1] / 2) / 2
self._training_instruction = visual.TextStim(win=self._window,
text="",
pos=(0, text_y_pos),
color="black",
height=39,
wrapWidth=self._window.size[0],
)
button_y_pos = -text_y_pos
self._training_button = optional.Button(win=self._window,
event_name="training",
label_size=40,
label_text="Нажми меня",
pos=(0, button_y_pos),
button_enabled=False)
self._last_animation = ((row, col) for col in range(-2, 3)
for row in range(-5, 6, 2))
self._last_animation_created = False
self._clock = core.Clock()
self._last_animation_update = 0
self._next_training_stage()
@staticmethod
def first_condition(movable, grid):
movable_x, movable_y = movable[0].visual_element.pos
grid_x, grid_y = grid[7].visual_element.pos
return movable_x == grid_x and movable_y == grid_y
@staticmethod
def second_condition(movable, grid):
wanted_positions = (tuple(grid[1].visual_element.pos), tuple(grid[15].visual_element.pos))
first_stick_in_place = tuple(movable[0].visual_element.pos) in wanted_positions
second_stick_in_place = tuple(movable[1].visual_element.pos) in wanted_positions
return first_stick_in_place and second_stick_in_place
def _next_training_stage(self):
self._target_grid_marks = []
add_sticks = next(self._training_sticks_position)
self.grid.create_movable_sticks(grid_indexes=add_sticks,
movable_stick_color=self.movable_stick_color)
for grid_element in self.grid.movable_elements:
stick_pos = grid_element.visual_element.pos
circle = visual.Circle(win=self._window,
fillColor="yellow",
pos=stick_pos,
size=7)
self._training_sticks_marks.append(circle)
if self._current_target_grid_positions is not None:
self.grid.set_grid_color(self.grid_color)
# TODO: remove old way
# for grid_element_idx in self._current_target_grid_positions:
# self.grid.grid_elements[grid_element_idx].visual_element.color = self.grid_color
self._current_target_grid_positions = next(self._target_grid_positions)
for grid_element_idx in self._current_target_grid_positions:
# self.grid.grid_elements[grid_element_idx].visual_element.color = "red"
target_pos = self.grid.grid_elements[grid_element_idx].visual_element.pos
cross = visual.ShapeStim(win=self._window,
vertices="cross",
fillColor="yellow",
pos=target_pos,
size=12,
ori=45)
self._target_grid_marks.append(cross)
self._training_instruction.text = next(self._training_messages)
self._current_condition = next(self._conditions)
if len(self.grid.movable_elements) == 4:
self._training_button.buttonEnabled = True
self.grid.return_to_default_positions()
def _is_training_stage_completed(self):
if self._training_button.button_pressed():
self._training_button.buttonEnabled = False
self.training_finished = True
self.grid.movable_elements = []
self._training_sticks_marks = []
self._training_instruction.text = "Тренировка закончена. Чтобы продолжить нажмите пробел"
return self._current_condition(self.grid.movable_elements, self.grid.grid_elements)
def run(self):
self.stick_mover.check_and_execute_moves(movable_sticks=self.grid.movable_elements,
grid_elements=self.grid.grid_elements)
if self._is_training_stage_completed():
self._next_training_stage()
def draw(self):
if not self._last_animation_created and self.training_finished:
try:
if self._clock.getTime() - self._last_animation_update >= 0.1:
self._last_animation_update = self._clock.getTime()
idx_of_grid_for_stick = (next(self._last_animation),)
self.grid.create_movable_sticks(grid_indexes=idx_of_grid_for_stick,
movable_stick_color="brown")
except StopIteration:
self._last_animation_created = True
if self._training_button.buttonEnabled:
self._training_button.draw()
# TODO: remove old way
# for grid_element in self.grid.grid_elements:
# grid_element.visual_element.draw()
self.grid.draw()
for target_mark in self._target_grid_marks:
target_mark.draw()
positions = []
for movable_element in self.grid.movable_elements:
movable_element.visual_element.draw()
pos = movable_element.visual_element.pos
positions.append(pos)
for pos, stick_mark in zip(positions, self._training_sticks_marks):
stick_mark.pos = pos
stick_mark.draw()
self._training_instruction.draw()
|
[
"katona.logic.eventhandler.PositiveSoundFeedback",
"katona.visual.optional.FakeButton",
"katona.logic.eventhandler.NegativeImageFeedback",
"katona.visual.grid.VisualGrid",
"katona.logic.essential.AnswerChecker",
"katona.logic.movement.StickMover",
"katona.visual.optional.ScreenCover",
"katona.logic.eventhandler.PositiveImageFeedback",
"csv.DictReader",
"katona.visual.optional.Button",
"katona.logic.eventsmeasures.ObjectiveImpasseFinder",
"psychopy.visual.ShapeStim",
"katona.logic.datasaver.DataSaver",
"psychopy.core.Clock",
"psychopy.visual.Circle",
"katona.logic.eventhandler.ZeroHandler",
"psychopy.visual.TextStim",
"katona.logic.eventhandler.TextTimeHandler",
"katona.logic.eventhandler.NegativeSoundFeedback"
] |
[((1030, 1042), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (1040, 1042), False, 'from psychopy import core, visual\n'), ((1339, 1472), 'katona.visual.grid.VisualGrid', 'VisualGrid', ([], {'window': 'window', 'stick_length': 'stick_length', 'stick_width': 'stick_width', 'field_size': 'field_size', 'grid_color': 'self.grid_color'}), '(window=window, stick_length=stick_length, stick_width=\n stick_width, field_size=field_size, grid_color=self.grid_color)\n', (1349, 1472), False, 'from katona.visual.grid import VisualGrid\n'), ((1653, 1678), 'katona.logic.movement.StickMover', 'StickMover', ([], {'window': 'window'}), '(window=window)\n', (1663, 1678), False, 'from katona.logic.movement import StickMover\n'), ((1906, 2022), 'katona.logic.datasaver.DataSaver', 'DataSaver', ([], {'save_folder': 'data_fp', 'experiment_info': 'experiment_info', 'special_event_finder': 'self._special_event_finder'}), '(save_folder=data_fp, experiment_info=experiment_info,\n special_event_finder=self._special_event_finder)\n', (1915, 2022), False, 'from katona.logic.datasaver import DataSaver\n'), ((2232, 2408), 'katona.visual.optional.Button', 'optional.Button', ([], {'win': 'window', 'event_name': '"""default place"""', 'label_size': '(40)', 'label_text': '"""Обновить"""', 'pos': '(button_x_pos, 0)', 'button_enabled': '(True)', 'button_color': 'background_color'}), "(win=window, event_name='default place', label_size=40,\n label_text='Обновить', pos=(button_x_pos, 0), button_enabled=True,\n button_color=background_color)\n", (2247, 2408), False, 'from katona.visual import optional\n'), ((2750, 2924), 'katona.visual.optional.Button', 'optional.Button', ([], {'win': 'window', 'event_name': '"""impasse"""', 'label_size': '(40)', 'label_text': '"""Я застрял"""', 'pos': '(-button_x_pos, 0)', 'button_enabled': '(True)', 'button_color': 'background_color'}), "(win=window, event_name='impasse', label_size=40, label_text\n ='Я застрял', pos=(-button_x_pos, 0), button_enabled=True, button_color\n =background_color)\n", (2765, 2924), False, 'from katona.visual import optional\n'), ((3275, 3474), 'katona.visual.optional.ScreenCover', 'optional.ScreenCover', ([], {'window': 'window', 'start_position': '(0, self.grid.outer_border)', 'size': 'self.grid.outer_border', 'cover_color': 'screen_cover_color', 'grow_rate': '(0.5)', 'time_to_cover': 'time_to_cover_grid'}), '(window=window, start_position=(0, self.grid.\n outer_border), size=self.grid.outer_border, cover_color=\n screen_cover_color, grow_rate=0.5, time_to_cover=time_to_cover_grid)\n', (3295, 3474), False, 'from katona.visual import optional\n'), ((4182, 4228), 'katona.logic.essential.AnswerChecker', 'essential.AnswerChecker', (['solution_absolute_idx'], {}), '(solution_absolute_idx)\n', (4205, 4228), False, 'from katona.logic import essential\n'), ((11135, 11274), 'katona.visual.grid.VisualGrid', 'VisualGrid', ([], {'window': 'self._window', 'stick_length': 'stick_length', 'stick_width': 'stick_width', 'field_size': 'field_size', 'grid_color': 'self.grid_color'}), '(window=self._window, stick_length=stick_length, stick_width=\n stick_width, field_size=field_size, grid_color=self.grid_color)\n', (11145, 11274), False, 'from katona.visual.grid import VisualGrid\n'), ((12279, 12310), 'katona.logic.movement.StickMover', 'StickMover', ([], {'window': 'self._window'}), '(window=self._window)\n', (12289, 12310), False, 'from katona.logic.movement import StickMover\n'), ((12419, 12545), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'self._window', 'text': '""""""', 'pos': '(0, text_y_pos)', 'color': '"""black"""', 'height': '(39)', 'wrapWidth': 'self._window.size[0]'}), "(win=self._window, text='', pos=(0, text_y_pos), color=\n 'black', height=39, wrapWidth=self._window.size[0])\n", (12434, 12545), False, 'from psychopy import core, visual\n'), ((12929, 13074), 'katona.visual.optional.Button', 'optional.Button', ([], {'win': 'self._window', 'event_name': '"""training"""', 'label_size': '(40)', 'label_text': '"""Нажми меня"""', 'pos': '(0, button_y_pos)', 'button_enabled': '(False)'}), "(win=self._window, event_name='training', label_size=40,\n label_text='Нажми меня', pos=(0, button_y_pos), button_enabled=False)\n", (12944, 13074), False, 'from katona.visual import optional\n'), ((13507, 13519), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (13517, 13519), False, 'from psychopy import core, visual\n'), ((1782, 1861), 'katona.logic.eventsmeasures.ObjectiveImpasseFinder', 'eventsmeasures.ObjectiveImpasseFinder', ([], {'minimum_data_to_identify': 'event_threshold'}), '(minimum_data_to_identify=event_threshold)\n', (1819, 1861), False, 'from katona.logic import eventsmeasures\n'), ((14645, 14719), 'psychopy.visual.Circle', 'visual.Circle', ([], {'win': 'self._window', 'fillColor': '"""yellow"""', 'pos': 'stick_pos', 'size': '(7)'}), "(win=self._window, fillColor='yellow', pos=stick_pos, size=7)\n", (14658, 14719), False, 'from psychopy import core, visual\n'), ((15545, 15654), 'psychopy.visual.ShapeStim', 'visual.ShapeStim', ([], {'win': 'self._window', 'vertices': '"""cross"""', 'fillColor': '"""yellow"""', 'pos': 'target_pos', 'size': '(12)', 'ori': '(45)'}), "(win=self._window, vertices='cross', fillColor='yellow',\n pos=target_pos, size=12, ori=45)\n", (15561, 15654), False, 'from psychopy import core, visual\n'), ((4439, 4485), 'katona.logic.eventhandler.PositiveSoundFeedback', 'eventhandler.PositiveSoundFeedback', (['sound_path'], {}), '(sound_path)\n', (4473, 4485), False, 'from katona.logic import eventhandler\n'), ((4543, 4589), 'katona.logic.eventhandler.NegativeSoundFeedback', 'eventhandler.NegativeSoundFeedback', (['sound_path'], {}), '(sound_path)\n', (4577, 4589), False, 'from katona.logic import eventhandler\n'), ((4801, 4855), 'katona.logic.eventhandler.PositiveImageFeedback', 'eventhandler.PositiveImageFeedback', (['window', 'image_path'], {}), '(window, image_path)\n', (4835, 4855), False, 'from katona.logic import eventhandler\n'), ((4913, 4967), 'katona.logic.eventhandler.NegativeImageFeedback', 'eventhandler.NegativeImageFeedback', (['window', 'image_path'], {}), '(window, image_path)\n', (4947, 4967), False, 'from katona.logic import eventhandler\n'), ((5482, 5648), 'katona.logic.eventhandler.TextTimeHandler', 'eventhandler.TextTimeHandler', (['window'], {'phrases_list': 'phrases', 'phrase_time_showed': '(10)', 'time_between_phrases': '(60)', 'position': '(-button_x_pos, 0)', 'width': '(max_width * 0.8)'}), '(window, phrases_list=phrases,\n phrase_time_showed=10, time_between_phrases=60, position=(-button_x_pos,\n 0), width=max_width * 0.8)\n', (5510, 5648), False, 'from katona.logic import eventhandler\n'), ((5997, 6018), 'katona.visual.optional.FakeButton', 'optional.FakeButton', ([], {}), '()\n', (6016, 6018), False, 'from katona.visual import optional\n'), ((5262, 5288), 'csv.DictReader', 'csv.DictReader', ([], {'f': 'csv_file'}), '(f=csv_file)\n', (5276, 5288), False, 'import csv\n'), ((6085, 6111), 'katona.logic.eventhandler.ZeroHandler', 'eventhandler.ZeroHandler', ([], {}), '()\n', (6109, 6111), False, 'from katona.logic import eventhandler\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 8 22:09:19 2017
@author: LinZhang
"""
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)
return dataset, labels
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# def initial variables:
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# def basic operation in cnn:
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
batch_size = 16
#patch_size = 5 # not really used, finetune your network for fun!
#depth = 16 # not really used, finetune your network for fun!
num_hidden = 1024
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# a small network with two convolutional layers, followed by one fully connected layer
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
keep_prob = tf.placeholder(tf.float32)
# Variables.
layer1_weights = weight_variable([3, 3, num_channels, 8]) # conv kernel
layer1_biases = bias_variable([8])
layer2_weights = weight_variable([3, 3, 8, 16]) # conv kernel
layer2_biases = bias_variable([16])
layer3_weights = weight_variable([image_size // 4 * image_size // 4 * 16, num_hidden])
layer3_biases = bias_variable([num_hidden])
layer4_weights = weight_variable([num_hidden, num_labels])
layer4_biases = bias_variable([num_labels])
# Model.
def model(data,use_dropout = False):
# convolution layer 1
conv1 = conv2d(data, layer1_weights)
hidden1 = tf.nn.relu(conv1 + layer1_biases)
hidden1_pool = max_pool_2x2(hidden1)
# convolution layer 2
conv2 = conv2d(hidden1_pool, layer2_weights)
hidden2 = tf.nn.relu(conv2 + layer2_biases)
hidden2_pool = max_pool_2x2(hidden2)
# full connection layer
shape = hidden2_pool.get_shape().as_list()
reshape = tf.reshape(hidden2_pool, [shape[0], shape[1] * shape[2] * shape[3]])
hidden3 = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
# dropout
if (use_dropout):
return tf.matmul(tf.nn.dropout(hidden3,keep_prob), layer4_weights) + layer4_biases
else:
return tf.matmul(hidden3, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset,use_dropout = True) # only training uses dropout
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# learning rate decay
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(0.05,
global_step, 100, 0.95, staircase=True)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob:0.5}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
|
[
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.nn.conv2d",
"tensorflow.truncated_normal",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"six.moves.range",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.nn.max_pool",
"tensorflow.Graph",
"six.moves.cPickle.load",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.exponential_decay",
"tensorflow.nn.dropout"
] |
[((2536, 2546), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2544, 2546), True, 'import tensorflow as tf\n'), ((844, 882), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (863, 882), True, 'import tensorflow as tf\n'), ((894, 914), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (905, 914), True, 'import tensorflow as tf\n'), ((957, 986), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (968, 986), True, 'import tensorflow as tf\n'), ((998, 1018), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1009, 1018), True, 'import tensorflow as tf\n'), ((1079, 1135), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (1091, 1135), True, 'import tensorflow as tf\n'), ((1170, 1245), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (1184, 1245), True, 'import tensorflow as tf\n'), ((1553, 1567), 'six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1564, 1567), True, 'from six.moves import cPickle as pickle\n'), ((2614, 2702), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, image_size, image_size, num_channels)'}), '(tf.float32, shape=(batch_size, image_size, image_size,\n num_channels))\n', (2628, 2702), True, 'import tensorflow as tf\n'), ((2730, 2788), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_labels)'}), '(tf.float32, shape=(batch_size, num_labels))\n', (2744, 2788), True, 'import tensorflow as tf\n'), ((2812, 2838), 'tensorflow.constant', 'tf.constant', (['valid_dataset'], {}), '(valid_dataset)\n', (2823, 2838), True, 'import tensorflow as tf\n'), ((2861, 2886), 'tensorflow.constant', 'tf.constant', (['test_dataset'], {}), '(test_dataset)\n', (2872, 2886), True, 'import tensorflow as tf\n'), ((2903, 2929), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2917, 2929), True, 'import tensorflow as tf\n'), ((4616, 4647), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (4627, 4647), True, 'import tensorflow as tf\n'), ((4700, 4772), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['(0.05)', 'global_step', '(100)', '(0.95)'], {'staircase': '(True)'}), '(0.05, global_step, 100, 0.95, staircase=True)\n', (4726, 4772), True, 'import tensorflow as tf\n'), ((5005, 5026), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (5018, 5026), True, 'import tensorflow as tf\n'), ((5177, 5200), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5187, 5200), True, 'import tensorflow as tf\n'), ((5294, 5310), 'six.moves.range', 'range', (['num_steps'], {}), '(num_steps)\n', (5299, 5310), False, 'from six.moves import range\n'), ((3591, 3624), 'tensorflow.nn.relu', 'tf.nn.relu', (['(conv1 + layer1_biases)'], {}), '(conv1 + layer1_biases)\n', (3601, 3624), True, 'import tensorflow as tf\n'), ((3780, 3813), 'tensorflow.nn.relu', 'tf.nn.relu', (['(conv2 + layer2_biases)'], {}), '(conv2 + layer2_biases)\n', (3790, 3813), True, 'import tensorflow as tf\n'), ((3969, 4037), 'tensorflow.reshape', 'tf.reshape', (['hidden2_pool', '[shape[0], shape[1] * shape[2] * shape[3]]'], {}), '(hidden2_pool, [shape[0], shape[1] * shape[2] * shape[3]])\n', (3979, 4037), True, 'import tensorflow as tf\n'), ((4492, 4570), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'tf_train_labels', 'logits': 'logits'}), '(labels=tf_train_labels, logits=logits)\n', (4531, 4570), True, 'import tensorflow as tf\n'), ((4854, 4902), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4887, 4902), True, 'import tensorflow as tf\n'), ((5217, 5246), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5244, 5246), True, 'import tensorflow as tf\n'), ((537, 558), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (546, 558), True, 'import numpy as np\n'), ((4067, 4101), 'tensorflow.matmul', 'tf.matmul', (['reshape', 'layer3_weights'], {}), '(reshape, layer3_weights)\n', (4076, 4101), True, 'import tensorflow as tf\n'), ((4291, 4325), 'tensorflow.matmul', 'tf.matmul', (['hidden3', 'layer4_weights'], {}), '(hidden3, layer4_weights)\n', (4300, 4325), True, 'import tensorflow as tf\n'), ((689, 714), 'numpy.argmax', 'np.argmax', (['predictions', '(1)'], {}), '(predictions, 1)\n', (698, 714), True, 'import numpy as np\n'), ((718, 738), 'numpy.argmax', 'np.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (727, 738), True, 'import numpy as np\n'), ((4192, 4225), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden3', 'keep_prob'], {}), '(hidden3, keep_prob)\n', (4205, 4225), True, 'import tensorflow as tf\n')]
|
import os
import subprocess
import sys
from bs4 import BeautifulSoup
class CloverCoveredLine:
def __init__(self, image_tag, filepath, filename, line_number):
self.image_tag = image_tag
self.filepath = filepath
self.filename = filename
self.line_number = line_number
def __members(self):
return (self.image_tag, self.filepath, self.filename, self.line_number)
def __eq__(self, other):
if type(other) is type(self):
return self.__members() == other.__members()
else:
return False
def to_CSV(self):
return '{},{},{},{}'.format(self.image_tag, self.filepath, self.filename, self.line_number)
def main(argv=None):
argv = argv or sys.argv
reports_dir, is_bugswarm, image_tag = _validate_input(argv)
image_tags = get_image_tag_list(reports_dir) if image_tag is None else [image_tag]
covered_lines = []
for image_tag in image_tags:
img_tag_covered_lines = {}
# Parse the POM.
clover_reports = get_clover_reports(reports_dir, image_tag, is_bugswarm)
if clover_reports is None:
continue
for report in clover_reports:
if report == '' or report is None:
continue
soup = BeautifulSoup(open(report), 'lxml-xml')
# Get all packages for the source and test code
project_packages = soup.project.find_all('package')
testproject_packages = soup.testproject.find_all('package')
# Iterate throguh all project packages collecting lines with greater than 0 counts
for package in project_packages:
for file in package.find_all('file'):
for line in file.find_all('line'):
line_count = line.get('count')
line_count = int(line.get('count')) if line_count is not None else 0
# if line_count is None:
# continue
# else:
# line_count = int(line_count)
if line_count > 0:
clover_line = CloverCoveredLine(image_tag, file.get('path'), file.get('name'), line.get('num'))
# if clover_line.to_CSV() not in img_tag_covered_lines:
img_tag_covered_lines[clover_line.to_CSV()] = 1
for test_package in testproject_packages:
for file in test_package.find_all('file'):
for line in file.find_all('line'):
line_count = line.get('count')
if line_count is None:
continue
else:
line_count = int(line_count)
if line_count > 0:
clover_line = CloverCoveredLine(image_tag, file.get('path'), file.get('name'), line.get('num'))
# if clover_line.to_CSV() not in img_tag_covered_lines:
img_tag_covered_lines[clover_line.to_CSV()] = 1
covered_lines.extend(list(img_tag_covered_lines.keys()))
with open('clover-covered-lines.csv', 'w+') as file:
for covered_line in covered_lines:
file.write('{}\n'.format(covered_line))
def _run_command(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
stdout = stdout.decode('utf-8').strip()
stderr = stderr.decode('utf-8').strip()
ok = process.returncode == 0
return process, stdout, stderr, ok
def _print_error(msg, stdout=None, stderr=None):
print('Error: ' + msg)
if stdout is not None:
print('stdout:\n{}'.format(stdout))
if stderr is not None:
print('stderr:\n{}'.format(stderr))
def get_clover_reports(reports_dir, image_tag, is_bugswarm):
bs_cmd = 'find {}/{}/failed/targetsite -name "clover.xml"'.format(reports_dir, image_tag)
d4j_cmd = 'find {}/{}/b -name "coverage.xml"'.format(reports_dir, image_tag)
cmd = bs_cmd if is_bugswarm else d4j_cmd
print(cmd)
_, stdout, stderr, ok = _run_command(cmd)
if not ok:
_print_error('Error getting clover-reports', stdout, stderr)
return None
return stdout.split('\n')
def get_image_tag_list(directory):
cmd = 'ls {}'.format(directory)
_, stdout, stderr, ok = _run_command(cmd)
if not ok:
_print_error('Error getting list of image_tags', stdout ,stderr)
image_tags = [x.strip() for x in stdout.split('\n')]
if 'from_host' in image_tags:
image_tags.remove('from_host')
return image_tags
def _print_usage():
print('Usage: python3 clover_parser.py <reports_dir> [image_tag]')
print('reports_dir: Path to the directory of reports')
def _validate_input(argv):
if len(argv) != 3 and len(argv) != 4:
_print_usage()
sys.exit(1)
reports_dir = argv[1]
is_bugswarm = True if arg[2] == 'true' else False
image_tag = argv[3] if len(argv) == 4 else None
if not os.path.isdir(reports_dir) and os.path.exists(reports_dir):
print('The reports_dir argument is not a file or does not exist. Exiting.')
_print_usage()
sys.exit(1)
return reports_dir, is_bugswarm, image_tag
if __name__ == '__main__':
sys.exit(main())
|
[
"os.path.isdir",
"subprocess.Popen",
"os.path.exists",
"sys.exit"
] |
[((3409, 3498), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n', (3425, 3498), False, 'import subprocess\n'), ((5010, 5021), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5018, 5021), False, 'import sys\n'), ((5197, 5224), 'os.path.exists', 'os.path.exists', (['reports_dir'], {}), '(reports_dir)\n', (5211, 5224), False, 'import os\n'), ((5341, 5352), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5349, 5352), False, 'import sys\n'), ((5166, 5192), 'os.path.isdir', 'os.path.isdir', (['reports_dir'], {}), '(reports_dir)\n', (5179, 5192), False, 'import os\n')]
|
import os
import random
from contextlib import contextmanager
from typing import Generator
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import config, ops
DIGITS = frozenset(str(i) for i in range(10))
@contextmanager
def tensorflow_random_state(seed: int) -> Generator[None, None, None]:
# Save values
origin_gpu_det = os.environ.get("TF_DETERMINISTIC_OPS", None)
orig_random_state = random.getstate()
orig_np_random_state = np.random.get_state()
if context.executing_eagerly():
tf_random_seed = context.global_seed()
else:
tf_random_seed = ops.get_default_graph().seed
determism_enabled = config.is_op_determinism_enabled()
config.enable_op_determinism()
# Set values
os.environ["TF_DETERMINISTIC_OPS"] = "1"
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
yield
# Reset values
if origin_gpu_det is not None:
os.environ["TF_DETERMINISTIC_OPS"] = origin_gpu_det
else:
os.environ.pop("TF_DETERMINISTIC_OPS")
random.setstate(orig_random_state)
np.random.set_state(orig_np_random_state)
tf.random.set_seed(tf_random_seed)
if not determism_enabled:
config.disable_op_determinism()
|
[
"tensorflow.random.set_seed",
"tensorflow.python.framework.config.is_op_determinism_enabled",
"numpy.random.seed",
"numpy.random.get_state",
"tensorflow.python.framework.config.enable_op_determinism",
"tensorflow.python.eager.context.global_seed",
"tensorflow.python.framework.ops.get_default_graph",
"numpy.random.set_state",
"os.environ.get",
"tensorflow.python.eager.context.executing_eagerly",
"random.seed",
"random.setstate",
"os.environ.pop",
"tensorflow.python.framework.config.disable_op_determinism",
"random.getstate"
] |
[((409, 453), 'os.environ.get', 'os.environ.get', (['"""TF_DETERMINISTIC_OPS"""', 'None'], {}), "('TF_DETERMINISTIC_OPS', None)\n", (423, 453), False, 'import os\n'), ((478, 495), 'random.getstate', 'random.getstate', ([], {}), '()\n', (493, 495), False, 'import random\n'), ((523, 544), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (542, 544), True, 'import numpy as np\n'), ((552, 579), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (577, 579), False, 'from tensorflow.python.eager import context\n'), ((717, 751), 'tensorflow.python.framework.config.is_op_determinism_enabled', 'config.is_op_determinism_enabled', ([], {}), '()\n', (749, 751), False, 'from tensorflow.python.framework import config, ops\n'), ((756, 786), 'tensorflow.python.framework.config.enable_op_determinism', 'config.enable_op_determinism', ([], {}), '()\n', (784, 786), False, 'from tensorflow.python.framework import config, ops\n'), ((854, 871), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (865, 871), False, 'import random\n'), ((876, 896), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (890, 896), True, 'import numpy as np\n'), ((901, 925), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (919, 925), True, 'import tensorflow as tf\n'), ((1113, 1147), 'random.setstate', 'random.setstate', (['orig_random_state'], {}), '(orig_random_state)\n', (1128, 1147), False, 'import random\n'), ((1152, 1193), 'numpy.random.set_state', 'np.random.set_state', (['orig_np_random_state'], {}), '(orig_np_random_state)\n', (1171, 1193), True, 'import numpy as np\n'), ((1198, 1232), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['tf_random_seed'], {}), '(tf_random_seed)\n', (1216, 1232), True, 'import tensorflow as tf\n'), ((606, 627), 'tensorflow.python.eager.context.global_seed', 'context.global_seed', ([], {}), '()\n', (625, 627), False, 'from tensorflow.python.eager import context\n'), ((1070, 1108), 'os.environ.pop', 'os.environ.pop', (['"""TF_DETERMINISTIC_OPS"""'], {}), "('TF_DETERMINISTIC_OPS')\n", (1084, 1108), False, 'import os\n'), ((1271, 1302), 'tensorflow.python.framework.config.disable_op_determinism', 'config.disable_op_determinism', ([], {}), '()\n', (1300, 1302), False, 'from tensorflow.python.framework import config, ops\n'), ((663, 686), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (684, 686), False, 'from tensorflow.python.framework import config, ops\n')]
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import operator
import os
import json
import math
def parseargs():
msg = "Merge translation options"
usage = "merge_translation_option.py [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
parser.add_argument("--input", type=str, required=True, nargs="+",
help="translation options")
parser.add_argument("--output", type=str, help="output path")
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
result = {}
tos = [json.load(open(i, 'r')) for i in args.input]
num_options = len(args.input)
for i in range(len(tos)):
print('option', i, ':', len(tos[i]), 'phrases')
for i in range(len(tos)):
for key in tos[i].keys():
if result.has_key(key):
continue
tmp_options = {}
for j in range(len(tos)):
if tos[j].has_key(key):
for item in tos[j][key]:
if tmp_options.has_key(item[0]):
tmp_options[item[0]] += item[1]
else:
tmp_options[item[0]] = item[1]
tmp_options = [list(k) for k in tmp_options.items()]
tmp_options = [[k[0], k[1]/num_options] for k in tmp_options]
result[key] = tmp_options
if len(result) % 10000 == 0:
print(len(result))
for j in range(len(tos)):
if tos[j].has_key(key):
print(tos[j][key])
print(tmp_options)
print('total:', len(result))
json.dump(result ,open(args.output, 'w'))
|
[
"argparse.ArgumentParser"
] |
[((384, 437), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'msg', 'usage': 'usage'}), '(description=msg, usage=usage)\n', (407, 437), False, 'import argparse\n')]
|
import threading
from os import path
from os.path import dirname
from typing import Optional
from sleuthdeck.deck import Deck
from sleuthdeck.deck import Key
from sleuthdeck.plugins.sleuth import Sleuth
class RepositoryLockKey(Key):
def __init__(self, sleuth: Sleuth, project: str, deployment: Optional[str] = None):
super().__init__()
self.sleuth = sleuth
self.project = project
self.deployment = deployment
self._thread = threading.Thread(target=self._update)
def connect(self, deck: Deck):
image = path.join(dirname(__file__), "lock.jpg")
self.image = image
super().connect(deck)
def _update(self):
# todo: add periodic updates from sleuth to update actions and icon
pass
|
[
"threading.Thread",
"os.path.dirname"
] |
[((471, 508), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._update'}), '(target=self._update)\n', (487, 508), False, 'import threading\n'), ((571, 588), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (578, 588), False, 'from os.path import dirname\n')]
|
# -*- coding: utf-8 -*-
from csv import reader, writer
from pathlib import Path
from random import sample
from typing import Any, List
import requests as requests
from resources.constants import BROWSER_USER_AGENT
def handle_request(url: str) -> Any:
headers = {'User-Agent': BROWSER_USER_AGENT, 'Upgrade-Insecure-Requests': '1', 'DNT': '1'}
try:
request = requests.get(url, headers=headers)
request.raise_for_status()
return request
except requests.exceptions.HTTPError as http_error:
print(f"Http Error: {http_error}")
except requests.exceptions.ConnectionError as connection_error:
print(f"Error Connecting: {connection_error}")
except requests.exceptions.TooManyRedirects as redirects_error:
print(f"Too Many Redirects: {redirects_error}")
except requests.exceptions.Timeout as timeout_error:
print(f"Timeout Error: {timeout_error}")
except requests.exceptions.RequestException as request_exception:
print(f"Error: {request_exception}")
return None
def select_subsets(language_set: List[str], treebank_set_size: int, sampling_size: int, cache_samples: bool) -> List[List[str]]:
if cache_samples:
cached_samples = load_cached_samples(treebank_set_size, sampling_size)
if cached_samples:
results = cached_samples
else:
samples = generate_samples(language_set, treebank_set_size, sampling_size)
save_samples(samples, treebank_set_size, sampling_size)
results = samples
else:
results = generate_samples(language_set, treebank_set_size, sampling_size)
return results
def load_cached_samples(treebank_set_size: int, sampling_size: int) -> List[List[str]]:
print(f"INFO: Loading {sampling_size} subset(s) of size {treebank_set_size} from disk")
file_path = Path(__file__).absolute()
root_folder = file_path.parent.parent
path_cache_folder = Path(root_folder).joinpath("cache")
cache_file = Path(path_cache_folder, f"{treebank_set_size}-{sampling_size}.csv")
if cache_file.exists():
samples = read_csv_file(cache_file)
return samples
else:
print(f"WARNING: There is no file of {sampling_size} subset(s) of size {treebank_set_size} that has been previously saved")
return []
def read_csv_file(file: Path) -> List[List[str]]:
print(f"INFO: Reading {file.name} file")
samples = []
with open(file, 'rt', encoding="utf-8", newline='') as csv_file:
csv_reader = reader(csv_file)
for row in csv_reader:
samples.append(row)
return samples
def generate_samples(language_set: List[str], treebank_set_size: int, sampling_size: int) -> List[List[str]]:
print(f"INFO: Selecting {sampling_size} subset(s) of size {treebank_set_size}")
results = []
if language_set:
while len(results) < sampling_size:
print(f"Number of subsets selected: {len(results)}/{sampling_size}", end="\r")
result = sample(language_set, k=treebank_set_size)
if result not in results:
results.append(result)
return results
def save_samples(samples: List[List[str]], treebank_set_size: int, sampling_size: int) -> None:
print(f"INFO: Saving {sampling_size} subset(s) of size {treebank_set_size} to disk")
file_path = Path(__file__).absolute()
root_folder = file_path.parent.parent
path_cache_folder = Path(root_folder).joinpath("cache")
path_cache_folder.mkdir(parents=True, exist_ok=True)
file_name = Path(path_cache_folder, f"{treebank_set_size}-{sampling_size}.csv")
with open(file_name, 'wt', encoding="utf-8", newline='') as cache_file:
csv_writer = writer(cache_file, dialect='unix')
csv_writer.writerows(samples)
|
[
"csv.reader",
"csv.writer",
"random.sample",
"pathlib.Path",
"requests.get"
] |
[((2009, 2076), 'pathlib.Path', 'Path', (['path_cache_folder', 'f"""{treebank_set_size}-{sampling_size}.csv"""'], {}), "(path_cache_folder, f'{treebank_set_size}-{sampling_size}.csv')\n", (2013, 2076), False, 'from pathlib import Path\n'), ((3574, 3641), 'pathlib.Path', 'Path', (['path_cache_folder', 'f"""{treebank_set_size}-{sampling_size}.csv"""'], {}), "(path_cache_folder, f'{treebank_set_size}-{sampling_size}.csv')\n", (3578, 3641), False, 'from pathlib import Path\n'), ((379, 413), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (391, 413), True, 'import requests as requests\n'), ((2538, 2554), 'csv.reader', 'reader', (['csv_file'], {}), '(csv_file)\n', (2544, 2554), False, 'from csv import reader, writer\n'), ((3740, 3774), 'csv.writer', 'writer', (['cache_file'], {'dialect': '"""unix"""'}), "(cache_file, dialect='unix')\n", (3746, 3774), False, 'from csv import reader, writer\n'), ((1864, 1878), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1868, 1878), False, 'from pathlib import Path\n'), ((1956, 1973), 'pathlib.Path', 'Path', (['root_folder'], {}), '(root_folder)\n', (1960, 1973), False, 'from pathlib import Path\n'), ((3029, 3070), 'random.sample', 'sample', (['language_set'], {'k': 'treebank_set_size'}), '(language_set, k=treebank_set_size)\n', (3035, 3070), False, 'from random import sample\n'), ((3372, 3386), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3376, 3386), False, 'from pathlib import Path\n'), ((3464, 3481), 'pathlib.Path', 'Path', (['root_folder'], {}), '(root_folder)\n', (3468, 3481), False, 'from pathlib import Path\n')]
|
import logging
def get_logger(module_name: str):
suffix = ""
if module_name != "__main__":
suffix = "." + module_name
# logger = logging.getLogger("serving.model-server" + suffix)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
logger.setLevel("INFO")
return logger
|
[
"logging.getLogger",
"logging.basicConfig"
] |
[((207, 247), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (226, 247), False, 'import logging\n'), ((261, 280), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (278, 280), False, 'import logging\n')]
|
#!/usr/bin/env python3
from typing import Sequence, Union
import pdb
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
class ForwardLoss(Metric):
r"""Loss metric that simply records the loss calculated
in forward pass
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_squared_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._sum_of_squared_errors += torch.sum(output).to(self._device)
self._num_examples += 1
@sync_all_reduce("_sum_of_squared_errors", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanSquaredError must have at least one example before it can be computed.")
return self._sum_of_squared_errors.item() / self._num_examples
|
[
"ignite.exceptions.NotComputableError",
"torch.sum",
"torch.tensor",
"ignite.metrics.metric.sync_all_reduce"
] |
[((688, 746), 'ignite.metrics.metric.sync_all_reduce', 'sync_all_reduce', (['"""_sum_of_squared_errors"""', '"""_num_examples"""'], {}), "('_sum_of_squared_errors', '_num_examples')\n", (703, 746), False, 'from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n'), ((419, 457), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self._device'}), '(0.0, device=self._device)\n', (431, 457), False, 'import torch\n'), ((854, 960), 'ignite.exceptions.NotComputableError', 'NotComputableError', (['"""MeanSquaredError must have at least one example before it can be computed."""'], {}), "(\n 'MeanSquaredError must have at least one example before it can be computed.'\n )\n", (872, 960), False, 'from ignite.exceptions import NotComputableError\n'), ((615, 632), 'torch.sum', 'torch.sum', (['output'], {}), '(output)\n', (624, 632), False, 'import torch\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class AMSoftmaxClsHead(ClsHead):
"""AMSoftmax classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
scale (int): scale with normalized cosine scores.
margin (float): margin of AmSoftmax
loss (dict): Config of classification loss.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
scale=30,
margin=0.2,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, )):
super(AMSoftmaxClsHead, self).__init__(loss=loss, topk=topk)
self.in_channels = in_channels
self.num_classes = num_classes
self.s = scale
self.m = margin
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self._init_layers()
def _init_layers(self):
self.W = nn.Parameter(torch.randn(self.num_classes, self.in_channels))
def cosine_sim(self, x1, x2, dim=1, eps=1e-8):
# print(x1, x2)
ip = torch.mm(x1, x2.T)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return ip / torch.ger(w1, w2).clamp(min=eps)
def init_weights(self):
nn.init.xavier_uniform_(self.W)
def simple_test(self, img):
"""Test without augmentation."""
cls_score = self.s * self.cosine_sim(img, self.W)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
if torch.onnx.is_in_onnx_export():
return pred
pred = list(pred.detach().cpu().numpy())
return pred
def forward_train(self, x, gt_label):
# print(x)
# compute cosine linear
cosine = self.cosine_sim(x, self.W)
# label mapping
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, gt_label.view(-1, 1), 1.0)
cls_score = self.s * (cosine - one_hot * self.m)
losses = self.loss(cls_score, gt_label)
return losses
|
[
"torch.ger",
"torch.zeros_like",
"torch.norm",
"torch.nn.init.xavier_uniform_",
"torch.mm",
"torch.randn",
"torch.nn.functional.softmax",
"torch.onnx.is_in_onnx_export"
] |
[((1405, 1423), 'torch.mm', 'torch.mm', (['x1', 'x2.T'], {}), '(x1, x2.T)\n', (1413, 1423), False, 'import torch\n'), ((1437, 1459), 'torch.norm', 'torch.norm', (['x1', '(2)', 'dim'], {}), '(x1, 2, dim)\n', (1447, 1459), False, 'import torch\n'), ((1473, 1495), 'torch.norm', 'torch.norm', (['x2', '(2)', 'dim'], {}), '(x2, 2, dim)\n', (1483, 1495), False, 'import torch\n'), ((1586, 1617), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.W'], {}), '(self.W)\n', (1609, 1617), True, 'import torch.nn as nn\n'), ((1942, 1972), 'torch.onnx.is_in_onnx_export', 'torch.onnx.is_in_onnx_export', ([], {}), '()\n', (1970, 1972), False, 'import torch\n'), ((2247, 2271), 'torch.zeros_like', 'torch.zeros_like', (['cosine'], {}), '(cosine)\n', (2263, 2271), False, 'import torch\n'), ((1267, 1314), 'torch.randn', 'torch.randn', (['self.num_classes', 'self.in_channels'], {}), '(self.num_classes, self.in_channels)\n', (1278, 1314), False, 'import torch\n'), ((1868, 1895), 'torch.nn.functional.softmax', 'F.softmax', (['cls_score'], {'dim': '(1)'}), '(cls_score, dim=1)\n', (1877, 1895), True, 'import torch.nn.functional as F\n'), ((1516, 1533), 'torch.ger', 'torch.ger', (['w1', 'w2'], {}), '(w1, w2)\n', (1525, 1533), False, 'import torch\n')]
|
#######
#
# classify_images.py
#
# This is a test driver for running our species classifiers and detectors.
# The script classifies one or more hard-coded image files.
#
# Because the inference code has not been assembled into a formal package yet,
# you should define API_ROOT to point to the base of our repo. This
# will be added to your Python path later in the script.
#
# This script has two non-code dependencies:
#
# * a classification model file (and, optionally, a detection model model)
# * a taxonomy file, so the scientific names used in the training data can
# be mapped to common names.
#
# We are currently testing against PyTorch 0.4.1 and Cuda 9.0, and we have tested on
# both Linux and Windows.
#
#######
#%% Constants and imports
import sys
import os
import pandas as pd
# Directory to which you sync'd the repo. Probably the same
# directory this file lives in, but for portability, this file is set up to only
# take dependencies on the repo according to this constant.
API_ROOT = r'd:\git\SpeciesClassification'
# Path to taxa.csv, for latin --> common mapping
#
# Set to None to disable latin --> common mapping
TAXONOMY_PATH = r'd:\temp\taxa.csv' # None
IMAGES_TO_CLASSIFY = [
r"D:\temp\animals\African_Elephant\30651.ngsversion.1421960098780.jpg",
r"D:\temp\animals\Alligator\Alligator_mississippiensis_01.JPG"
]
# CLASSIFICATION_MODEL_PATH = r'd:\temp\models\inc4-incres2-560-78.5\model_deploy.pth.tar'
CLASSIFICATION_MODEL_PATH = r"D:\temp\models\resnext-448-78.8\model_best.pth.tar"
# Detection (i.e., bounding box generation) is optional; set to None
# to disable detection
DETECTION_MODEL_PATH = None
SUBDIRS_TO_IMPORT = ['DetectionClassificationAPI','FasterRCNNDetection','PyTorchClassification']
# This must be True if detection is enabled. Classification can be run
# on the CPU or GPU.
USE_GPU = True
# List of image sizes to use, one per model in the ensemble. Images will be resized
# and reshaped to square images prior to classification.
#
# We typically specify [560,560] if we're loading our Inception/InceptionResnet
# ensemble. For ResNext, we typically specify [448].
#
# IMAGE_SIZES = [560, 560]
IMAGE_SIZES = [448]
#%% Path setup to import the classification code
if (not API_ROOT.lower() in map(str.lower,sys.path)):
print("Adding {} to the python path".format(API_ROOT))
sys.path.insert(0,API_ROOT)
for s in SUBDIRS_TO_IMPORT:
importPath = os.path.join(API_ROOT,s)
print("Adding {} to the python path".format(API_ROOT))
sys.path.insert(0,importPath)
#%% Import classification modules
import api as speciesapi
#%% Build Latin --> common mapping
latinToCommon = {}
if TAXONOMY_PATH != None:
print("Reading taxonomy file")
# Read taxonomy file; takes ~1 minute
df = pd.read_csv(TAXONOMY_PATH)
df = df.fillna('')
# Columns are:
#
# taxonID,scientificName,parentNameUsageID,taxonRank,vernacularName,wikipedia_url
# Create dictionary by ID
nRows = df.shape[0]
for index, row in df.iterrows():
latinName = row['scientificName']
latinName = latinName.strip()
if len(latinName)==0:
print("Warning: invalid scientific name at {}".format(index))
latinName = 'unknown'
commonName = row['vernacularName']
commonName = commonName.strip()
latinName = latinName.lower()
commonName = commonName.lower()
latinToCommon[latinName] = commonName
print("Finished reading taxonomy file")
#%% Define Latin-->common lookup
def doLatinToCommon(latinName):
if len(latinToCommon) == 0:
return latinName
latinName = latinName.lower()
if not latinName in latinToCommon:
print("Warning: latin name {} not in lookup table".format(latinName))
commonName = latinName
else:
commonName = latinToCommon[latinName]
commonName = commonName.strip()
if (len(commonName) == 0):
print("Warning: empty result for latin name {}".format(latinName))
commonName = latinName
return commonName
#%% Create the model(s)
assert os.path.isfile(CLASSIFICATION_MODEL_PATH)
if DETECTION_MODEL_PATH != None:
assert os.path.isfile(DETECTION_MODEL_PATH)
print("Loading model")
model = speciesapi.DetectionClassificationAPI(CLASSIFICATION_MODEL_PATH, DETECTION_MODEL_PATH, IMAGE_SIZES, USE_GPU)
print("Finished loading model")
#%% Classify images
nImages = len(IMAGES_TO_CLASSIFY)
for iImage,imageFileName in enumerate(IMAGES_TO_CLASSIFY):
print("Processing image {} of {}".format(iImage,nImages))
# def predict_image(self, image_path, topK=1, multiCrop=False, predict_mode=PredictMode.classifyUsingDetect):
try:
prediction = model.predict_image(imageFileName, topK=5, multiCrop=False,
predict_mode=speciesapi.PredictMode.classifyOnly)
except Exception as e:
print("Error classifying image {} ({}): {}".format(iImage,imageFileName,str(e)))
continue
fn = os.path.splitext(imageFileName)[0]
for i in range(0, len(prediction.species)):
latinName = prediction.species[i]
likelihood = prediction.species_scores[i]
commonName = doLatinToCommon(latinName)
print('"{}","{}","{}","{}","{}","{}"\n'.format(
iImage,fn,i,latinName,commonName,likelihood))
print("Finished classifying {} images".format(nImages))
|
[
"api.DetectionClassificationAPI",
"pandas.read_csv",
"sys.path.insert",
"os.path.isfile",
"os.path.splitext",
"os.path.join"
] |
[((4215, 4256), 'os.path.isfile', 'os.path.isfile', (['CLASSIFICATION_MODEL_PATH'], {}), '(CLASSIFICATION_MODEL_PATH)\n', (4229, 4256), False, 'import os\n'), ((4370, 4482), 'api.DetectionClassificationAPI', 'speciesapi.DetectionClassificationAPI', (['CLASSIFICATION_MODEL_PATH', 'DETECTION_MODEL_PATH', 'IMAGE_SIZES', 'USE_GPU'], {}), '(CLASSIFICATION_MODEL_PATH,\n DETECTION_MODEL_PATH, IMAGE_SIZES, USE_GPU)\n', (4407, 4482), True, 'import api as speciesapi\n'), ((2391, 2419), 'sys.path.insert', 'sys.path.insert', (['(0)', 'API_ROOT'], {}), '(0, API_ROOT)\n', (2406, 2419), False, 'import sys\n'), ((2852, 2878), 'pandas.read_csv', 'pd.read_csv', (['TAXONOMY_PATH'], {}), '(TAXONOMY_PATH)\n', (2863, 2878), True, 'import pandas as pd\n'), ((4301, 4337), 'os.path.isfile', 'os.path.isfile', (['DETECTION_MODEL_PATH'], {}), '(DETECTION_MODEL_PATH)\n', (4315, 4337), False, 'import os\n'), ((2472, 2497), 'os.path.join', 'os.path.join', (['API_ROOT', 's'], {}), '(API_ROOT, s)\n', (2484, 2497), False, 'import os\n'), ((2572, 2602), 'sys.path.insert', 'sys.path.insert', (['(0)', 'importPath'], {}), '(0, importPath)\n', (2587, 2602), False, 'import sys\n'), ((5143, 5174), 'os.path.splitext', 'os.path.splitext', (['imageFileName'], {}), '(imageFileName)\n', (5159, 5174), False, 'import os\n')]
|
#!/usr/bin/env python3.5
# coding=utf-8
'''
@date = '17/12/1'
@author = 'lynnchan'
@email = '<EMAIL>'
'''
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
from gconfig import *
train_path = Train_Data_Path
test_path = Test_Data_Path
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
if os.path.splitext(root.find('filename').text)[1] == '.jpg':
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
else:
value = (root.find('filename').text+'.jpg',
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def creat_csv():
if type(train_path) !=list:
xml_train = xml_to_csv(train_path)
xml_train.to_csv(train_path+'/'+Train_File_Name+'.csv', index=None)
print('Successfully converted train xml to csv.')
else:
for i in train_path:
xml_train = xml_to_csv(i)
xml_train.to_csv(i + '/' + Train_File_Name + '.csv', index=None)
print('Successfully converted list train xml to csv.')
if type(test_path) != list:
xml_test = xml_to_csv(test_path)
xml_test.to_csv(test_path+'/'+Test_File_Name+'.csv', index=None)
print('Successfully converted test xml to csv.')
else:
for i in test_path:
xml_train = xml_to_csv(i)
xml_train.to_csv(i + '/' + Test_File_Name + '.csv', index=None)
print('Successfully converted list train xml to csv.')
if __name__ == '__main__':
creat_csv()
|
[
"pandas.DataFrame",
"xml.etree.ElementTree.parse",
"glob.glob"
] |
[((345, 371), 'glob.glob', 'glob.glob', (["(path + '/*.xml')"], {}), "(path + '/*.xml')\n", (354, 371), False, 'import glob\n'), ((1607, 1650), 'pandas.DataFrame', 'pd.DataFrame', (['xml_list'], {'columns': 'column_name'}), '(xml_list, columns=column_name)\n', (1619, 1650), True, 'import pandas as pd\n'), ((389, 407), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (397, 407), True, 'import xml.etree.ElementTree as ET\n')]
|
import numpy as np
import math
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda, optimizers, serializers, Variable
from chainer import function
from chainer.utils import type_check
from .ops import *
class DCGANGenerator(chainer.Chain):
def __init__(self, latent=128, out_ch=3, base_size=1024, use_bn=True, up_layers=4, upsampling='up_deconv'):
layers = {}
self.up_layers = up_layers
self.base_size = base_size
self.latent = latent
if use_bn:
norm = 'bn'
w = chainer.initializers.Normal(0.02)
else:
norm = None
w = None
base = base_size
layers['c_first'] = NNBlock(latent, 4*4*base, nn='linear', norm=norm, w_init=w)
for i in range(up_layers-1):
layers['c'+str(i)] = NNBlock(base, base//2, nn=upsampling, norm=norm, w_init=w)
base = base//2
layers['c'+str(up_layers-1)] = NNBlock(base, out_ch, nn=upsampling, norm=None, w_init=w, activation=F.tanh)
#print(layers)
super(DCGANGenerator, self).__init__(**layers)
def __call__(self, z, test=False):
h = self.c_first(z, test=test)
h = F.reshape(h, (h.data.shape[0], self.base_size, 4, 4))
for i in range(self.up_layers):
h = getattr(self, 'c'+str(i))(h, test=test)
return h
|
[
"chainer.initializers.Normal",
"chainer.functions.reshape"
] |
[((1230, 1283), 'chainer.functions.reshape', 'F.reshape', (['h', '(h.data.shape[0], self.base_size, 4, 4)'], {}), '(h, (h.data.shape[0], self.base_size, 4, 4))\n', (1239, 1283), True, 'import chainer.functions as F\n'), ((578, 611), 'chainer.initializers.Normal', 'chainer.initializers.Normal', (['(0.02)'], {}), '(0.02)\n', (605, 611), False, 'import chainer\n')]
|
import csv
from pathlib import Path
folder = 'recent_changes'
all_csv = [pth for pth in Path(folder).iterdir()
if pth.suffix == '.csv']
header = None
rows = []
for f_csv in all_csv:
with open(f_csv) as csvfile:
reader = csv.reader(csvfile)
header = next(reader) # read header
rows += list(reader)
with open(f'{folder}_all.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
writer.writerows(rows)
|
[
"pathlib.Path",
"csv.reader",
"csv.writer"
] |
[((403, 422), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (413, 422), False, 'import csv\n'), ((248, 267), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (258, 267), False, 'import csv\n'), ((90, 102), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (94, 102), False, 'from pathlib import Path\n')]
|
#############################################
# global imports
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import unquote
import sys
import os
import json
#############################################
#############################################
# local imports
from serverutils import process
from serverutils.utils import postjson, ProcessManager
#############################################
FLASK_SERVER_URL = os.environ["FLASK_SERVER_URL"]
SIMPLE_ENGINE_PATH = os.path.join("engines", os.environ["SIMPLE_ENGINE_NAME"])
PROCESS_READ_CALLBACK_URL = FLASK_SERVER_URL + "/read"
#############################################
class SimpleProcessManager(ProcessManager):
def __init__(self, key):
super().__init__(key)
def read_line_callback(self, sline):
postjson(PROCESS_READ_CALLBACK_URL, {
"kind": "procreadline",
"prockey": self.key,
"sline": sline
})
class EngineProcessManager(SimpleProcessManager):
def __init__(self, key):
super().__init__(key)
def popen(self):
return process.PopenProcess(
SIMPLE_ENGINE_PATH,
self.read_line_callback
)
class BotProcessManager(SimpleProcessManager):
def __init__(self, key):
super().__init__(key)
def popen(self):
return process.PopenProcess(
"python",
self.read_line_callback,
proc_args = ["-u", "bot.py"],
ignore_cwd = True
)
processmanagers = {
"engine": EngineProcessManager("engine"),
"bot": BotProcessManager("bot")
}
#############################################
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
global processmanagers
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
message = "! no command"
if len(self.path) > 1:
commandstr = unquote(self.path[1:])
print("commandstr", commandstr)
try:
commandobj = None
commandobj = json.loads(commandstr)
try:
command = commandobj.get("command", None)
key = commandobj.get("key", None)
if command == "r":
message = processmanagers[key].start()
elif command == "s":
message = processmanagers[key].stop()
else:
message = processmanagers[key].send_line(command)
except:
message = "! command error"
except:
message = "! command parse error"
print("status", message)
self.wfile.write(bytes(message, "utf8"))
#############################################
def start_server():
print('starting server...')
server_address = (sys.argv[1], int(sys.argv[2]))
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('running server on address', server_address)
httpd.serve_forever()
#############################################
start_server()
print("server started")
|
[
"urllib.parse.unquote",
"http.server.HTTPServer",
"json.loads",
"serverutils.utils.postjson",
"os.path.join",
"serverutils.process.PopenProcess"
] |
[((502, 559), 'os.path.join', 'os.path.join', (['"""engines"""', "os.environ['SIMPLE_ENGINE_NAME']"], {}), "('engines', os.environ['SIMPLE_ENGINE_NAME'])\n", (514, 559), False, 'import os\n'), ((3080, 3137), 'http.server.HTTPServer', 'HTTPServer', (['server_address', 'testHTTPServer_RequestHandler'], {}), '(server_address, testHTTPServer_RequestHandler)\n', (3090, 3137), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n'), ((816, 918), 'serverutils.utils.postjson', 'postjson', (['PROCESS_READ_CALLBACK_URL', "{'kind': 'procreadline', 'prockey': self.key, 'sline': sline}"], {}), "(PROCESS_READ_CALLBACK_URL, {'kind': 'procreadline', 'prockey':\n self.key, 'sline': sline})\n", (824, 918), False, 'from serverutils.utils import postjson, ProcessManager\n'), ((1108, 1173), 'serverutils.process.PopenProcess', 'process.PopenProcess', (['SIMPLE_ENGINE_PATH', 'self.read_line_callback'], {}), '(SIMPLE_ENGINE_PATH, self.read_line_callback)\n', (1128, 1173), False, 'from serverutils import process\n'), ((1352, 1456), 'serverutils.process.PopenProcess', 'process.PopenProcess', (['"""python"""', 'self.read_line_callback'], {'proc_args': "['-u', 'bot.py']", 'ignore_cwd': '(True)'}), "('python', self.read_line_callback, proc_args=['-u',\n 'bot.py'], ignore_cwd=True)\n", (1372, 1456), False, 'from serverutils import process\n'), ((1997, 2019), 'urllib.parse.unquote', 'unquote', (['self.path[1:]'], {}), '(self.path[1:])\n', (2004, 2019), False, 'from urllib.parse import unquote\n'), ((2156, 2178), 'json.loads', 'json.loads', (['commandstr'], {}), '(commandstr)\n', (2166, 2178), False, 'import json\n')]
|
"""
Search using NASA CMR
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import json
import logging
import requests
import numpy as np
_logger = logging.getLogger(__name__)
from podpac.core.utils import _get_from_url
CMR_URL = r"https://cmr.earthdata.nasa.gov/search/"
def get_collection_entries(session=None, short_name=None, keyword=None, **kwargs):
"""Uses NASA CMR to retrieve metadata about a collection
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
short_name: str, optional
The short name of the dataset
keyword: str, optional
Any keyword search parameters
**kwargs: str, optional
Any additional query parameters
Returns
---------
list:
A list of collection metadata dictionaries
Examples:
-----------
>>> # This make the following request https://cmr.earthdata.nasa.gov/search/collections.json?short_name=SPL2SMAP_S
>>> get_collection_id(short_name='SPL2SMAP_S')
['C1522341104-NSIDC_ECS']
"""
base_url = CMR_URL + "collections.json?"
if short_name is not None:
kwargs["short_name"] = short_name
if keyword is not None:
kwargs["keyword"] = keyword
query_string = "&".join([k + "=" + v for k, v in kwargs.items()])
# use generic requests session if `session` is not defined
if session is None:
session = requests
pydict = _get_from_url(base_url + query_string, session).json()
entries = pydict["feed"]["entry"]
return entries
def get_collection_id(session=None, short_name=None, keyword=None, **kwargs):
"""Uses NASA CMR to retrieve collection id
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
short_name: str, optional
The short name of the dataset
keyword: str, optional
Any keyword search parameters
**kwargs: str, optional
Any additional query parameters
Returns
---------
list
A list of collection id's (ideally only one)
Examples:
-----------
>>> # This make the following request https://cmr.earthdata.nasa.gov/search/collections.json?short_name=SPL2SMAP_S
>>> get_collection_id(short_name='SPL2SMAP_S')
['C1522341104-NSIDC_ECS']
"""
entries = get_collection_entries(session=session, short_name=short_name, keyword=keyword, **kwargs)
if len(entries) > 1:
_logger.warning("Found more than 1 entry for collection_id search")
collection_id = [e["id"] for e in entries]
return collection_id
def search_granule_json(session=None, entry_map=None, **kwargs):
"""Search for specific files from NASA CMR for a particular collection
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
entry_map: function
A function applied to each individual entry. Could be used to filter out certain data in an entry
**kwargs: dict
Additional query string parameters.
At minimum the provider, provider_id, concept_id, collection_concept_id, short_name, version, or entry_title
need to be provided for a granule search.
Returns
---------
list
Entries for each granule in the collection based on the search terms
"""
base_url = CMR_URL + "granules.json?"
if not np.any(
[
m not in kwargs
for m in [
"provider",
"provider_id",
"concept_id",
"collection_concept_id",
"short_name",
"version",
"entry_title",
]
]
):
raise ValueError(
"Need to provide either"
" provider, provider_id, concept_id, collection_concept_id, short_name, version or entry_title"
" for granule search."
)
if "page_size" not in kwargs:
kwargs["page_size"] = "2000"
if entry_map is None:
entry_map = lambda x: x
query_string = "&".join([k + "=" + str(v) for k, v in kwargs.items()])
if session is None:
session = requests
url = base_url + query_string
if "page_num" not in kwargs:
entries = _get_all_granule_pages(session, url, entry_map)
else:
pydict = _get_from_url(url, session).json()
entries = list(map(entry_map, pydict["feed"]["entry"]))
return entries
def _get_all_granule_pages(session, url, entry_map, max_paging_depth=1000000):
"""Helper function for searching through all pages for a collection.
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
url: str
URL to website
entry_map: function
Function for mapping the entries to a desired format
max_paging_depth
"""
page_size = int([q for q in url.split("?")[1].split("&") if "page_size" in q][0].split("=")[1])
max_pages = int(max_paging_depth / page_size)
pydict = _get_from_url(url, session).json()
entries = list(map(entry_map, pydict["feed"]["entry"]))
for i in range(1, max_pages):
page_url = url + "&page_num=%d" % (i + 1)
page_entries = _get_from_url(page_url, session).json()["feed"]["entry"]
if not page_entries:
break
entries.extend(list(map(entry_map, page_entries)))
return entries
|
[
"numpy.any",
"podpac.core.utils._get_from_url",
"logging.getLogger"
] |
[((188, 215), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (205, 215), False, 'import logging\n'), ((3485, 3632), 'numpy.any', 'np.any', (["[(m not in kwargs) for m in ['provider', 'provider_id', 'concept_id',\n 'collection_concept_id', 'short_name', 'version', 'entry_title']]"], {}), "([(m not in kwargs) for m in ['provider', 'provider_id', 'concept_id',\n 'collection_concept_id', 'short_name', 'version', 'entry_title']])\n", (3491, 3632), True, 'import numpy as np\n'), ((1500, 1547), 'podpac.core.utils._get_from_url', '_get_from_url', (['(base_url + query_string)', 'session'], {}), '(base_url + query_string, session)\n', (1513, 1547), False, 'from podpac.core.utils import _get_from_url\n'), ((5155, 5182), 'podpac.core.utils._get_from_url', '_get_from_url', (['url', 'session'], {}), '(url, session)\n', (5168, 5182), False, 'from podpac.core.utils import _get_from_url\n'), ((4439, 4466), 'podpac.core.utils._get_from_url', '_get_from_url', (['url', 'session'], {}), '(url, session)\n', (4452, 4466), False, 'from podpac.core.utils import _get_from_url\n'), ((5358, 5390), 'podpac.core.utils._get_from_url', '_get_from_url', (['page_url', 'session'], {}), '(page_url, session)\n', (5371, 5390), False, 'from podpac.core.utils import _get_from_url\n')]
|
import requests
from bs4 import BeautifulSoup
url = input()
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
print(soup.find("h1").text)
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((67, 84), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (79, 84), False, 'import requests\n'), ((92, 131), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (105, 131), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) <NAME> - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by <NAME> <<EMAIL>>, January 2017
import os
import numpy as np
import pandas as pd
import scipy.io as sio
import utils.datasets as utils
# ---------------------------------------------------------------
# data set paths
__data_path = "{}/data/NMR_40wines.mat".format(os.path.split(__file__)[0])
__pickle_path = "{}/cache/nmr_wine.pickle".format(os.path.split(__file__)[0])
# ---------------------------------------------------------------
# TODO: Add docstring with usage examples (see 'uv_fuel' data set)
@utils.load_data_from_pickle(__pickle_path)
def load_nmr_wines():
"""Loads the NMR Wines data set.
Returns:
A Pandas DataFrame with all the data set info.
Examples:
>>> ds = load_nmr_wines()
>>> ds['wine_data'].shape
(40, 8729)
>>> ds['wine_ints'].shape
(22, 1)
"""
# loading matlab data set object
raw_data = sio.loadmat(__data_path)
# validating loaded data
if raw_data is None:
raise Exception('Error while loading 1H-NMR Wines data.')
# getting features labels
features_labels = raw_data['ppm'][0].tolist()
# getting properties labels
props_labels = list(map(lambda x: x[0], raw_data['Label'][0]))
# getting samples data
data = raw_data['X']
# getting properties data
props_data = raw_data['Y']
# creating the wine data set
all_data = np.hstack([data, props_data])
all_labels = range(all_data.shape[0])
all_features = features_labels + props_labels
wine_ds = utils.build_data_set(all_data.tolist(), all_labels, all_features)
# ----------------------
wine_ints_data = raw_data['wine_ints'][0]
wine_ints_ds = pd.DataFrame(wine_ints_data)
# ----------------------
# the final data set
ds = {
'wine_data': wine_ds,
'wine_ints': wine_ints_ds,
}
# returning the final data set
return ds
|
[
"pandas.DataFrame",
"scipy.io.loadmat",
"numpy.hstack",
"utils.datasets.load_data_from_pickle",
"os.path.split"
] |
[((717, 759), 'utils.datasets.load_data_from_pickle', 'utils.load_data_from_pickle', (['__pickle_path'], {}), '(__pickle_path)\n', (744, 759), True, 'import utils.datasets as utils\n'), ((1102, 1126), 'scipy.io.loadmat', 'sio.loadmat', (['__data_path'], {}), '(__data_path)\n', (1113, 1126), True, 'import scipy.io as sio\n'), ((1593, 1622), 'numpy.hstack', 'np.hstack', (['[data, props_data]'], {}), '([data, props_data])\n', (1602, 1622), True, 'import numpy as np\n'), ((1891, 1919), 'pandas.DataFrame', 'pd.DataFrame', (['wine_ints_data'], {}), '(wine_ints_data)\n', (1903, 1919), True, 'import pandas as pd\n'), ((472, 495), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (485, 495), False, 'import os\n'), ((551, 574), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (564, 574), False, 'import os\n')]
|
import pygame
import flappy
from thread import callback
import speech_recognition as sr
import sys
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[2] == "False":
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source) # we only need to calibrate once, before we start listening
# start listening in the background (note that we don't have to do this inside a `with` statement)
stop_listening = r.listen_in_background(m, callback)
pygame.init() # initialize pygame
pygame.display.set_caption('Flappy Birds For Handicapped People')
flappy.play_game()
|
[
"pygame.init",
"speech_recognition.Microphone",
"flappy.play_game",
"pygame.display.set_caption",
"speech_recognition.Recognizer"
] |
[((545, 558), 'pygame.init', 'pygame.init', ([], {}), '()\n', (556, 558), False, 'import pygame\n'), ((584, 649), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Flappy Birds For Handicapped People"""'], {}), "('Flappy Birds For Handicapped People')\n", (610, 649), False, 'import pygame\n'), ((654, 672), 'flappy.play_game', 'flappy.play_game', ([], {}), '()\n', (670, 672), False, 'import flappy\n'), ((193, 208), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (206, 208), True, 'import speech_recognition as sr\n'), ((221, 236), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (234, 236), True, 'import speech_recognition as sr\n')]
|
'''
This example will print the gesture name
'''
from communitysdk import list_connected_devices, MotionSensorKit
devices = list_connected_devices()
msk_filter = filter(lambda device: isinstance(device, MotionSensorKit), devices)
msk = next(msk_filter, None) # Get first Motion Sensor Kit
if msk == None:
print('No Motion Sensor was found :(')
else:
def on_gesture(gestureValue):
print('Gesture detected:', gestureValue)
try:
msk.set_mode('gesture')
except Exception as e:
print(e)
msk.on_gesture = on_gesture
print('Wave your hand above the Motion Sensor:')
|
[
"communitysdk.list_connected_devices"
] |
[((126, 150), 'communitysdk.list_connected_devices', 'list_connected_devices', ([], {}), '()\n', (148, 150), False, 'from communitysdk import list_connected_devices, MotionSensorKit\n')]
|
# encoding: utf-8
from zope.interface import implements
from zope.schema.vocabulary import SimpleVocabulary
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from plone.memoize.instance import memoize
class TestingVocabulary(object):
implements(IVocabularyFactory)
@memoize
def __call__(self, context):
""" """
res = []
res.append(SimpleTerm('existing_key1', 'existing_key1', 'Existing v\xc3\xa9lue 1'))
res.append(SimpleTerm('existing_key2', 'existing_key2', 'Existing v\xc3\xa9lue 2'))
res.append(SimpleTerm('existing_key3', 'existing_key3', 'Existing v\xc3\xa9lue 3'))
return SimpleVocabulary(res)
TestingVocabularyFactory = TestingVocabulary()
class TestingFullVocabulary(object):
implements(IVocabularyFactory)
@memoize
def __call__(self, context):
""" """
res = []
res.append(SimpleTerm('existing_key1', 'existing_key1', 'Full existing value 1'))
res.append(SimpleTerm('existing_key2', 'existing_key2', 'Full existing value 2'))
res.append(SimpleTerm('existing_key3', 'existing_key3', 'Full existing value 3'))
return SimpleVocabulary(res)
TestingFullVocabularyFactory = TestingFullVocabulary()
|
[
"zope.schema.vocabulary.SimpleVocabulary",
"zope.schema.vocabulary.SimpleTerm",
"zope.interface.implements"
] |
[((292, 322), 'zope.interface.implements', 'implements', (['IVocabularyFactory'], {}), '(IVocabularyFactory)\n', (302, 322), False, 'from zope.interface import implements\n'), ((807, 837), 'zope.interface.implements', 'implements', (['IVocabularyFactory'], {}), '(IVocabularyFactory)\n', (817, 837), False, 'from zope.interface import implements\n'), ((694, 715), 'zope.schema.vocabulary.SimpleVocabulary', 'SimpleVocabulary', (['res'], {}), '(res)\n', (710, 715), False, 'from zope.schema.vocabulary import SimpleVocabulary\n'), ((1203, 1224), 'zope.schema.vocabulary.SimpleVocabulary', 'SimpleVocabulary', (['res'], {}), '(res)\n', (1219, 1224), False, 'from zope.schema.vocabulary import SimpleVocabulary\n'), ((422, 487), 'zope.schema.vocabulary.SimpleTerm', 'SimpleTerm', (['"""existing_key1"""', '"""existing_key1"""', '"""Existing vélue 1"""'], {}), "('existing_key1', 'existing_key1', 'Existing vélue 1')\n", (432, 487), False, 'from zope.schema.vocabulary import SimpleTerm\n'), ((514, 579), 'zope.schema.vocabulary.SimpleTerm', 'SimpleTerm', (['"""existing_key2"""', '"""existing_key2"""', '"""Existing vélue 2"""'], {}), "('existing_key2', 'existing_key2', 'Existing vélue 2')\n", (524, 579), False, 'from zope.schema.vocabulary import SimpleTerm\n'), ((606, 671), 'zope.schema.vocabulary.SimpleTerm', 'SimpleTerm', (['"""existing_key3"""', '"""existing_key3"""', '"""Existing vélue 3"""'], {}), "('existing_key3', 'existing_key3', 'Existing vélue 3')\n", (616, 671), False, 'from zope.schema.vocabulary import SimpleTerm\n'), ((937, 1006), 'zope.schema.vocabulary.SimpleTerm', 'SimpleTerm', (['"""existing_key1"""', '"""existing_key1"""', '"""Full existing value 1"""'], {}), "('existing_key1', 'existing_key1', 'Full existing value 1')\n", (947, 1006), False, 'from zope.schema.vocabulary import SimpleTerm\n'), ((1027, 1096), 'zope.schema.vocabulary.SimpleTerm', 'SimpleTerm', (['"""existing_key2"""', '"""existing_key2"""', '"""Full existing value 2"""'], {}), "('existing_key2', 'existing_key2', 'Full existing value 2')\n", (1037, 1096), False, 'from zope.schema.vocabulary import SimpleTerm\n'), ((1117, 1186), 'zope.schema.vocabulary.SimpleTerm', 'SimpleTerm', (['"""existing_key3"""', '"""existing_key3"""', '"""Full existing value 3"""'], {}), "('existing_key3', 'existing_key3', 'Full existing value 3')\n", (1127, 1186), False, 'from zope.schema.vocabulary import SimpleTerm\n')]
|
#!/usr/bin/env python3
import numpy as np
import pickle
from PIL import Image
w = pickle.load(open("weights1000.pkl", "rb"))
def Classify(example):
return w.dot(example)
#Seems to get 2, 3, 4 correct...
for i in range(0, 5):
image = Image.open("test_images/{}.jpg".format(i)).convert("L")
x = np.asarray(image.getdata())
x = (255 - x)/255
x = np.r_[x, 1]
y = Classify(x)
print(y)
print("Actual: {} Classification: {}".format(i, np.argmax(y)))
|
[
"numpy.argmax"
] |
[((464, 476), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (473, 476), True, 'import numpy as np\n')]
|
import unittest
from awacs import s3, ec2, iam
from awacs.aws import PolicyDocument, Statement, Action, Condition
from awacs.aws import StringEquals, StringLike
class TestEquality(unittest.TestCase):
def test_condition_equality(self):
self.assertEqualWithHash(
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])),
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])))
self.assertNotEqualWithHash(
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])),
Condition(StringLike("s3:prefix", ["other/${aws:username}/*"])))
self.assertNotEqualWithHash(
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])),
Condition(StringEquals("s3:prefix", ["home/${aws:username}/*"])))
def test_arn_equality(self):
self.assertEqualWithHash(
s3.ARN("myBucket"), s3.ARN("myBucket"))
self.assertNotEqualWithHash(
s3.ARN("myBucket"), s3.ARN("myOtherBucket"))
self.assertEqualWithHash(
ec2.ARN("some-resource", "some-region", "some-account"),
ec2.ARN("some-resource", "some-region", "some-account"))
self.assertNotEqualWithHash(
ec2.ARN("some-resource", "some-region", "some-account"),
ec2.ARN("some-resource", "some-other-region", "some-account"))
self.assertNotEqualWithHash(
ec2.ARN("some-resource", "some-region", "some-account"),
iam.ARN("some-resource", "some-region", "some-account"))
def test_action_equality(self):
self.assertEqualWithHash(
Action('autoscaling', 'DescribeLaunchConfigurations'),
Action('autoscaling', 'DescribeLaunchConfigurations'))
self.assertNotEqualWithHash(
Action('autoscaling', 'DescribeLaunchConfigurations'),
Action('ec2', 'DescribeInstances'))
def test_statement_equality(self):
one = Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
one_again = Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
two = Statement(
Effect="Allow",
Action=[
Action('ec2', 'DescribeInstances'),
],
Resource=["*"]
)
self.assertEqualWithHash(one, one_again)
self.assertNotEqualWithHash(one, two)
def test_policy_document_equality(self):
one = PolicyDocument(
Version="2012-10-17",
Statement=[
Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
]
)
one_again = PolicyDocument(
Version="2012-10-17",
Statement=[
Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
]
)
two = PolicyDocument(
Version="2012-10-17",
Statement=[
Statement(
Effect="Allow",
Action=[
Action('ec2', 'DescribeInstances'),
],
Resource=["*"]
)
]
)
self.assertEqualWithHash(one, one_again)
self.assertNotEqualWithHash(one, two)
def assertEqualWithHash(self, one, two):
self.assertTrue(one == two)
self.assertEqual(hash(one), hash(two))
def assertNotEqualWithHash(self, one, two):
self.assertTrue(one != two)
self.assertNotEqual(hash(one), hash(two))
|
[
"awacs.s3.ARN",
"awacs.aws.StringEquals",
"awacs.aws.Action",
"awacs.aws.StringLike",
"awacs.ec2.ARN",
"awacs.iam.ARN"
] |
[((891, 909), 'awacs.s3.ARN', 's3.ARN', (['"""myBucket"""'], {}), "('myBucket')\n", (897, 909), False, 'from awacs import s3, ec2, iam\n'), ((911, 929), 'awacs.s3.ARN', 's3.ARN', (['"""myBucket"""'], {}), "('myBucket')\n", (917, 929), False, 'from awacs import s3, ec2, iam\n'), ((981, 999), 'awacs.s3.ARN', 's3.ARN', (['"""myBucket"""'], {}), "('myBucket')\n", (987, 999), False, 'from awacs import s3, ec2, iam\n'), ((1001, 1024), 'awacs.s3.ARN', 's3.ARN', (['"""myOtherBucket"""'], {}), "('myOtherBucket')\n", (1007, 1024), False, 'from awacs import s3, ec2, iam\n'), ((1073, 1128), 'awacs.ec2.ARN', 'ec2.ARN', (['"""some-resource"""', '"""some-region"""', '"""some-account"""'], {}), "('some-resource', 'some-region', 'some-account')\n", (1080, 1128), False, 'from awacs import s3, ec2, iam\n'), ((1142, 1197), 'awacs.ec2.ARN', 'ec2.ARN', (['"""some-resource"""', '"""some-region"""', '"""some-account"""'], {}), "('some-resource', 'some-region', 'some-account')\n", (1149, 1197), False, 'from awacs import s3, ec2, iam\n'), ((1249, 1304), 'awacs.ec2.ARN', 'ec2.ARN', (['"""some-resource"""', '"""some-region"""', '"""some-account"""'], {}), "('some-resource', 'some-region', 'some-account')\n", (1256, 1304), False, 'from awacs import s3, ec2, iam\n'), ((1318, 1379), 'awacs.ec2.ARN', 'ec2.ARN', (['"""some-resource"""', '"""some-other-region"""', '"""some-account"""'], {}), "('some-resource', 'some-other-region', 'some-account')\n", (1325, 1379), False, 'from awacs import s3, ec2, iam\n'), ((1431, 1486), 'awacs.ec2.ARN', 'ec2.ARN', (['"""some-resource"""', '"""some-region"""', '"""some-account"""'], {}), "('some-resource', 'some-region', 'some-account')\n", (1438, 1486), False, 'from awacs import s3, ec2, iam\n'), ((1500, 1555), 'awacs.iam.ARN', 'iam.ARN', (['"""some-resource"""', '"""some-region"""', '"""some-account"""'], {}), "('some-resource', 'some-region', 'some-account')\n", (1507, 1555), False, 'from awacs import s3, ec2, iam\n'), ((1640, 1693), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (1646, 1693), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((1707, 1760), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (1713, 1760), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((1812, 1865), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (1818, 1865), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((1879, 1913), 'awacs.aws.Action', 'Action', (['"""ec2"""', '"""DescribeInstances"""'], {}), "('ec2', 'DescribeInstances')\n", (1885, 1913), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((298, 349), 'awacs.aws.StringLike', 'StringLike', (['"""s3:prefix"""', "['home/${aws:username}/*']"], {}), "('s3:prefix', ['home/${aws:username}/*'])\n", (308, 349), False, 'from awacs.aws import StringEquals, StringLike\n'), ((374, 425), 'awacs.aws.StringLike', 'StringLike', (['"""s3:prefix"""', "['home/${aws:username}/*']"], {}), "('s3:prefix', ['home/${aws:username}/*'])\n", (384, 425), False, 'from awacs.aws import StringEquals, StringLike\n'), ((488, 539), 'awacs.aws.StringLike', 'StringLike', (['"""s3:prefix"""', "['home/${aws:username}/*']"], {}), "('s3:prefix', ['home/${aws:username}/*'])\n", (498, 539), False, 'from awacs.aws import StringEquals, StringLike\n'), ((564, 616), 'awacs.aws.StringLike', 'StringLike', (['"""s3:prefix"""', "['other/${aws:username}/*']"], {}), "('s3:prefix', ['other/${aws:username}/*'])\n", (574, 616), False, 'from awacs.aws import StringEquals, StringLike\n'), ((679, 730), 'awacs.aws.StringLike', 'StringLike', (['"""s3:prefix"""', "['home/${aws:username}/*']"], {}), "('s3:prefix', ['home/${aws:username}/*'])\n", (689, 730), False, 'from awacs.aws import StringEquals, StringLike\n'), ((755, 808), 'awacs.aws.StringEquals', 'StringEquals', (['"""s3:prefix"""', "['home/${aws:username}/*']"], {}), "('s3:prefix', ['home/${aws:username}/*'])\n", (767, 808), False, 'from awacs.aws import StringEquals, StringLike\n'), ((2045, 2098), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (2051, 2098), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((2248, 2301), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (2254, 2301), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((2445, 2479), 'awacs.aws.Action', 'Action', (['"""ec2"""', '"""DescribeInstances"""'], {}), "('ec2', 'DescribeInstances')\n", (2451, 2479), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((2879, 2932), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (2885, 2932), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((3244, 3297), 'awacs.aws.Action', 'Action', (['"""autoscaling"""', '"""DescribeLaunchConfigurations"""'], {}), "('autoscaling', 'DescribeLaunchConfigurations')\n", (3250, 3297), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n'), ((3604, 3638), 'awacs.aws.Action', 'Action', (['"""ec2"""', '"""DescribeInstances"""'], {}), "('ec2', 'DescribeInstances')\n", (3610, 3638), False, 'from awacs.aws import PolicyDocument, Statement, Action, Condition\n')]
|
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Description: modify entrance
"""
import datetime
import json
import re
import requests
from retrying import retry
from javcra.api.gitee_api import Issue
from javcra.common.constant import REPO_BASE_URL, RELEASE_URL
from javcra.libs.log import logger
from javcra.libs.read_excel import download_file
class Operation(Issue):
"""
md operation for release issue description
"""
def init_md_table(self, t_head=None, body_info=None, block_title="", prefix="", suffix=""):
"""
initialize the md table of specific part like "CVE part" for release issue
Args:
t_head: table head. e.g.["CVE", "仓库", "status"]
body_info: table body
block_title: title of block. e.g: "## 1.CVE"
prefix: table prefix. e.g.: "修复cve xx 个"
suffix: characters between the end of the table and the next block.
Raises:
ValueError: The thead must be a list or tuple
Returns:
str: markdown table str
"""
if not t_head:
t_head = []
if not isinstance(t_head, (list, tuple)):
raise ValueError("The thead must be a list or tuple.")
thead_str = "|" + "|".join(t_head) + "|\n" + "|-" * len(t_head) + "|\n"
tbody_str = self.convert_md_table_format(t_head, body_info)
table_str = thead_str + tbody_str
if prefix:
table_str = prefix + "\n" + table_str
return "\n".join([block_title, table_str, suffix])
@staticmethod
def convert_md_table_format(table_head, issue_info):
"""
get markdown table body according to table_head and issue_info
Args:
table_head: table head like ["issue","status",...]
issue_info: issue info like [{"issue":...,"status":...},....]
Returns:
markdown table str
"""
if not issue_info:
issue_info = []
table_body_str = ""
for info in issue_info:
table_body_str += "|"
for word in table_head:
table_body_str += str(info.get(word)) + "|"
table_body_str += "\n"
return table_body_str
@staticmethod
def get_block_lines(issue_body_lines, start_flag, end_flag):
"""
get block lines of specific part from issue body lines
Args:
issue_body_lines: the lines of issue body
start_flag: start flag of specific part, like ""## 1、CVE""
end_flag: end flag of specific part, like "\n"
Returns: block_lines: lines in specific part like "cve part"
block_start_idx: start index of specific part
block_end_idx: end index of specific part
"""
block_start_idx = 0
block_end_idx = 0
flag = 0
# get block lines
for idx, line in enumerate(issue_body_lines):
if not flag and line.startswith(start_flag):
# represents the start of block
flag = 1
block_start_idx = idx
continue
if flag and line == end_flag:
block_end_idx = idx
break
return issue_body_lines[block_start_idx:block_end_idx], block_start_idx, block_end_idx
@staticmethod
def modify_block_lines(origin_lines, block_lines, block_start, block_end):
"""
modify block lines for add or delete operation
Args:
origin_lines: list, issue body splitlines
block_lines: list, block str splitlines
block_start: start index of block
block_end: end index of block
Returns:
new lines for issue body, list
"""
# to get count and then modify str "修复CVE xxx个"
fix_line_idx = -1
count = 0
for index, cur_line in enumerate(block_lines):
# demo: 修复CVE xxx个
if cur_line.startswith("修复"):
fix_line_idx = index
# demo: |#I41R53:CVE-2021-36222|krb5|
if cur_line.startswith("|#"):
count += 1
if fix_line_idx != -1:
block_lines[fix_line_idx] = re.sub(
"\d+", str(count), block_lines[fix_line_idx]
)
# modify block lines
origin_lines[block_start:block_end] = block_lines
return origin_lines
@staticmethod
def __append_info_in_specific_block(append_info, block_lines):
"""
append info in specific block for add operation
Args:
append_info: issue info or requires info, dict
block_lines: lines of specific block
Returns:
block_lines: block lines after append
"""
for key, value in append_info.items():
# if the issue to be added is already in the table, then continue
if any([key in line for line in block_lines]):
logger.info("issue {} already exists in body content.".format(key))
continue
# if the requires info to be added already in the table, then not add
value_lines = value.splitlines(keepends=True)
append_value_lines = []
for line in value_lines:
if line not in block_lines:
append_value_lines.append(line)
value = "".join(append_value_lines)
block_lines.append(value)
return block_lines
@staticmethod
def __delete_issue_in_specific_block(delete_issue, block_lines):
"""
delete issue in specific block for delete operation
Args:
block_lines: lines of specific block
delete_issue: issue to delete
Returns:
block_lines: block lines after delete
"""
to_remove_idx = -1
for idx, block_line in enumerate(block_lines):
if delete_issue in block_line:
to_remove_idx = idx
break
if to_remove_idx != -1:
block_lines.pop(to_remove_idx)
else:
logger.info("The issue {} does not exist in release issue description."
"".format(delete_issue))
return block_lines
@staticmethod
def __update_info_in_specific_block(update_info, block_lines):
"""
update info in specific block for update operation
Args:
update_info: issue to update
block_lines: lines of specific block
Returns:
block_lines: new lines of specific block
"""
for issue_id, issue_content in update_info.items():
if not issue_content:
continue
for idx, ln in enumerate(block_lines):
if issue_id in ln:
block_lines[idx] = issue_content
break
return block_lines
def get_new_body_lines(self, old_issue_info, append_info=None, delete_info=None,
update_info=None, start_flag="", end_flag="\n"):
"""
generating a new issue body by add or delete or update operation
Args:
old_issue_info: old issue info
append_info: issues to add. like {issue_id:{"repo":..,"status":...},...}
delete_info: issues to delete.
update_info: issues to update.
start_flag: start flag of block
end_flag: end flag of block.
Raises:
ValueError:
append_info、 delete_info need at least one
Returns:
new body lines
"""
if not any((append_info, delete_info, update_info)):
raise ValueError("append_info or delete_info or update info need at least one")
issue_body_lines = old_issue_info.splitlines(keepends=True)
block_lines, block_start_idx, block_end_idx = self.get_block_lines(
issue_body_lines, start_flag, end_flag)
if append_info:
block_lines = self.__append_info_in_specific_block(append_info, block_lines)
elif delete_info:
block_lines = self.__delete_issue_in_specific_block(delete_info, block_lines)
else:
block_lines = self.__update_info_in_specific_block(update_info, block_lines)
final_lines = self.modify_block_lines(issue_body_lines, block_lines, block_start_idx,
block_end_idx)
return "".join(final_lines)
def create_jenkins_comment(self, jenkins_result):
"""method to create issue comment
Args:
jenkins_result: jenkins result
Returns:
comment_res: Success and failure in creating a comment
"""
for result in jenkins_result:
if not result.get("status"):
logger.error("failed to obtain jenkins_result")
return
th = ["name", "status", "output"]
comment = self.init_md_table(th, jenkins_result)
comment_res = self.create_issue_comment(comment)
if not comment_res:
logger.error("Failed to create Jenkins' comment message %s" % comment)
return
return comment_res
def add_for_specific_block(self, body_str, issues, table_head, block_name):
"""
add info in specific block
Args:
body_str: str, issue body
issues: issues to be add
table_head: list, table head
block_name: block name
Returns:
processed issue body str
"""
if not body_str:
raise ValueError("no content of release issue body, failed to add.")
issues_dict = dict()
issues_info_list = list()
# If the block is "requires", then get the md format str directly, like "|bluez|接口变更|"
if "requires" in block_name:
requires_md_str = self.convert_md_table_format(table_head, issues)
if requires_md_str:
issues_info_list.append(requires_md_str)
issues_dict = {"requires_str": requires_md_str}
else:
# for other blocks, get detail issue info according to each issue id, then get the md format str
# like "|#I41R53:CVE-2021-36222|krb5|已完成|7.5|1.18.2|否|"
for issue_id in issues:
single_issue_info = self.get_single_issue_info(issue_id, block_name)
if single_issue_info:
issues_info_list.append(single_issue_info)
issue_info = self.convert_md_table_format(table_head, single_issue_info)
issues_dict.setdefault(issue_id, issue_info)
# if all the info to be add are empty
if not issues_info_list:
raise ValueError("failed to add, please check whether the issues to be added exists.")
return self.get_new_body_lines(
body_str, append_info=issues_dict, start_flag=block_name, end_flag="\n"
)
def delete_for_specific_block(self, body_str, issues, block_name):
"""
delete info in specific block
Args:
body_str: str, issue body
issues: issues to be delete
block_name:block name
Returns:
processed issue body str
"""
if not body_str:
raise ValueError("no content of release issue body, failed to delete.")
res_str = body_str
# delete each issue and then get new issue body lines
for issue_id in issues:
res_str = self.get_new_body_lines(
res_str, delete_info=issue_id, start_flag=block_name, end_flag="\n"
)
return res_str
@staticmethod
def __get_score(body_str):
"""
get the score of cve
Args:
body_str: cve issue body str
Returns:
str: score value or no score
"""
# to match openEuler评分 for cve
euler_score_pattern = re.compile("openEuler评分.*?(?P<euler_score>[0-9\.]+)", flags=re.S)
euler_res = euler_score_pattern.search(body_str)
if euler_res:
return euler_res["euler_score"]
else:
# to match BaseScore for cve
base_score_pattern = re.compile("BaseScore[::](?P<base_score>[0-9\.]+)")
base_score = base_score_pattern.search(body_str)
return base_score["base_score"] if base_score else "no score info"
def __is_abi_change(self, body_str):
"""
Parsing whether the abi has changed
Args:
body_str: cve issue body
Returns:
"是" or "否"
"""
# to match whether the abi has changed of specific branch
abi_content_pattern = re.compile("修复是否涉及abi变化.*?(?P<abi>.*)[\\n$]", flags=re.S)
abi_res = abi_content_pattern.search(body_str)
if not abi_res:
logger.error("The abi pattern did not match the info")
return "否"
abi_info = abi_res["abi"]
branch = self.get_update_issue_branch()
if not branch:
return "否"
for line in abi_info.splitlines():
if branch in line and "是" in line:
return "是"
return "否"
def get_single_issue_info(self, issue_id, block_name):
"""
get singe issue info for specific block
Args:
block_name: name of block
issue_id: issue id
Returns:
list: issue info list
"""
issue_content = self.get_issue_info(issue_number=issue_id)
if not issue_content:
logger.error("can not get the content of issue {}, perhaps this issue does not exist.".format(issue_id))
return []
repository = issue_content.get("repository", {})
# for all the block, get the dict of repository and status for the issue
issue_info = {
"仓库": repository.get("name", "无仓库信息"),
"status": issue_content.get("issue_state", "无状态信息")
}
block_names_list = ["## 2、bugfix", "# 3、安装、自编译问题", "# 4、遗留问题"]
if block_name in block_names_list:
issue_info["issue"] = "#" + issue_id
if "遗留" in block_name:
issue_info["type"] = issue_content.get("issue_type", "无type信息")
issue_info["status"] = "遗留"
elif "CVE" in block_name:
issue_body = self.get_issue_body(issue_id)
if not issue_body:
logger.error("empty issue body for {}, can not get the info for {} block.".format(issue_id, block_name))
return []
version_pattern = re.compile("漏洞归属的版本[::](?P<version>.*)")
version = version_pattern.search(issue_body)
issue_info["CVE"] = "#" + issue_id
issue_info["score"] = self.__get_score(issue_body)
issue_info["version"] = version["version"] if version else "no version info"
issue_info["abi是否变化"] = self.__is_abi_change(issue_body)
return [issue_info]
def update_for_specific_block(self, body_str, issues, table_head, block_name):
"""
Update specific table modules
Args:
body_str: body info
issues: list of issue numbers
table_head: table head
block_name: block name
Returns:
get_new_body_lines: The new issue of body
"""
if not body_str:
raise ValueError("no content of release issue, failed to update")
to_update = {}
for issue_id in issues:
# latest issue status
single_issue_info = self.get_single_issue_info(issue_id, block_name)
to_update.setdefault(
issue_id, self.convert_md_table_format(table_head, single_issue_info)
)
return self.get_new_body_lines(
body_str, update_info=to_update, start_flag=block_name, end_flag="\n"
)
def operate_for_specific_block(self, table_head, block_name, table_body=None, prefix="", operate="init",
body_str=None, issues=None):
"""
Process init, add, delete operations for specific block
Args:
table_head: list, table head
block_name: str, block name like ""## 1、CVE""
table_body: table_body of specific part for init, like [{..},{..},..].
prefix: prefix of block, like "修复了bugfix xxx个"
operate: init, add, delete
body_str: issue body, str
issues: issue id, list
Raises:
ValueError: not allowed operate
Returns:
processed release issue body str
"""
if not table_body:
table_body = []
if operate == "init":
return self.init_md_table(table_head, table_body, block_name, prefix)
elif operate == "add":
return self.add_for_specific_block(body_str, issues, table_head, block_name)
elif operate == "delete":
return self.delete_for_specific_block(body_str, issues, block_name)
elif operate == "update":
return self.update_for_specific_block(body_str, issues, table_head, block_name)
else:
raise ValueError(
"not allowed 'operate' value,expected in ['init','add','delete','update'],but given {}".format(operate)
)
def init(self, *args):
"""
init specific block
Returns:
init str
"""
return self.get_new_issue_body(operate="init", *args)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
raise NotImplementedError
class CveIssue(Operation):
"""
operation CVE in issue
"""
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def create_cve_list(self, user_email):
"""
The CVE-Manager is triggered to generate the CVE list and archive it
Args:
user_email (str): gitee user email
"""
# Take cve within three months
start_time = (datetime.datetime.now() + datetime.timedelta(days=-90)).strftime('%Y-%m-%d')
email_name = user_email.split('@')[0]
url = "https://api.openeuler.org/cve-manager/v1/download/excel/triggerCveData?startTime=" + \
start_time + "&typeName=" + email_name
try:
response = requests.get(url, headers=self.headers)
if response.status_code == 200 and "a task being processed" in response.text:
logger.info("The CVE-Manager is triggered to generate the CVE list and archive the CVE list")
return True
logger.error("The CVE List file fails to be archived,"
"The response status code is %s,"
"the response body is %s" % (response.status_code, response.text))
return False
except (requests.RequestException, AttributeError) as error:
logger.error("The CVE List file fails to be archived because %s " % error)
return False
def get_cve_list(self, *args):
"""
Obtain cVE-related information provided by the CVE-Manager.
Returns:
cve_list: Data in Excel in dictionary form
"""
user_email, obs_ak, obs_sk = args
# trigger cve_manger to archive
resp = self.create_cve_list(user_email)
if not resp:
raise ValueError("trigger cve-manege archive failure")
@retry(stop_max_attempt_number=5, wait_fixed=60000)
def get_list():
"""
Get archived files
Returns:
cve_list: document content
"""
now_time = datetime.date(
datetime.date.today().year,
datetime.date.today().month,
datetime.date.today().day,
).strftime("%Y-%m-%d")
branch_name = self.get_update_issue_branch()
if not branch_name:
logger.error("Failed to obtain branch")
return []
cve_list = download_file(
now_time, "{}_updateinfo.xlsx".format(branch_name), obs_ak, obs_sk
)
if not cve_list:
logger.error("Failed to obtain CVE data")
raise ValueError("Failed to obtain CVE data")
return cve_list
cve_list = get_list()
return cve_list
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for cve block operation
Args:
operate: operate str. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
new issue body str
"""
if not issues:
issues = []
t_head = ["CVE", "仓库", "status", "score", "version", "abi是否变化"]
block_name = "## 1、CVE"
logger.info("Start to obtain cve archive information, it may take a few minutes.")
cve_list = [] if operate != "init" else self.get_cve_list(*args)
cve_prefix = "修复CVE {}个".format(len(cve_list))
return self.operate_for_specific_block(t_head, block_name, prefix=cve_prefix, operate=operate,
table_body=cve_list, body_str=body_str, issues=issues)
class BugFixIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for bugfix block operation
Args:
operate: operate str. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
str: new issue body str
"""
if not issues:
issues = []
table_head = ["issue", "仓库", "status"]
block_name = "## 2、bugfix"
bugfix_list = []
bugfix_prefix = "修复bugfix {}个".format(len(bugfix_list))
return self.operate_for_specific_block(
table_head,
block_name,
prefix=bugfix_prefix,
operate=operate,
table_body=bugfix_list,
body_str=body_str,
issues=issues,
)
class RequiresIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
@staticmethod
def get_requires_list():
"""
get requires list
Returns:
requires list, like [{"仓库":..., "引入原因":...},...]
"""
# since the code that generates pkg requires is not in the repository,
# so it is assumed that the return value is []
return []
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for requires block operation
Args:
operate. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue list
Returns:
new issue body str
"""
t_head = ["仓库", "引入原因"]
block_name = "## 3、requires"
if operate not in ["init", "add"]:
raise ValueError("requires block operation only allowed in ['init', 'add'].")
issues = self.get_requires_list()
return self.operate_for_specific_block(
t_head, block_name, operate=operate, body_str=body_str, issues=issues
)
class InstallBuildIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for install build block operation
Args:
operate: operate str. expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
new issue body str
"""
table_head = ["issue", "仓库", "status"]
block_name = "# 3、安装、自编译问题"
return self.operate_for_specific_block(
table_head,
block_name,
operate=operate,
body_str=body_str,
issues=issues
)
class RemainIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for remain block operation
Args:
operate: operate str. expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
str: new issue body str
"""
t_header = ["issue", "仓库", "status", "type"]
block_name = "# 4、遗留问题"
return self.operate_for_specific_block(
t_header,
block_name,
operate=operate,
body_str=body_str,
issues=issues
)
class IssueOperation(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
args = (repo, token, issue_num)
self.cve_object = CveIssue(*args)
self.bugfix_object = BugFixIssue(*args)
self.requires_object = RequiresIssue(*args)
self.install_build_object = InstallBuildIssue(*args)
self.remain_object = RemainIssue(*args)
def init_repo_table(self):
"""
init repo table
return:
md table str
"""
block_name = "# 2、测试repo源"
table_head = ["repo_type", "url"]
table_str = self.init_md_table(table_head)
return block_name + table_str
def create_install_build_issue(self, failed_type, pkg_name, log_data):
"""
create issue when install failed or build failed
Args:
failed_type: install failed or build failed
pkg_name: package name
log_data: Compilation log information
return:
issue_id
"""
branch = self.get_update_issue_branch()
if not branch:
logger.error("failed to create install build issue because the release issue branch not found.")
return None
release_time = self.get_release_time()
if not release_time:
logger.error("failed to create install build issue because the release time not found.")
return None
params = {
"repo": pkg_name,
"owner": self.owner,
"access_token": self.token,
"title": "[{brh}] {pkg} {verify_type} failed {release_date}".format(pkg=pkg_name, verify_type=failed_type,
brh=branch, release_date=release_time)
}
command = ""
if failed_type == "build":
command = "rpmbuild --rebuild"
elif failed_type == "install":
command = "yum install"
params["body"] = """Branch: {brh}
Component: {pkg}
Instructions to reappear the problem : {command}
Expected results: successfully {_type}
Actual results: failed to {_type}
<b>Partial failure log:</b>
<P>
{log_data}
""".format(brh=branch, pkg=pkg_name, command=command,
_type=failed_type, log_data=log_data)
issue_id = self.create_issue(params)
return issue_id
def get_update_version_info(self):
"""
Get update target and personnel information
Returns:
update version info
"""
issue_body = self.get_issue_body(self.issue_num)
if issue_body:
if re.compile("1、CVE.*?\\n\\n", re.S).search(issue_body):
logger.error("Issue has CVE content, maybe you already have operated start update command.")
return None
if "代码冻结" not in issue_body:
logger.error("the code freeze time is not in release issue body.")
return None
if not issue_body.endswith("\n"):
issue_body += "\n"
return issue_body
return None
def get_release_time(self):
"""
get the date for release
Returns:
release_date
"""
issue_body = self.get_issue_body(self.issue_num)
if not issue_body:
logger.error("no content of release issue body.")
return None
date_info = re.compile("(?P<release_date>代码冻结.*?\\n\\n)", re.S).search(issue_body)
if not date_info:
logger.error("the code freeze time is not in release issue body.")
return None
split_date_info = re.split(r":|:", date_info["release_date"].strip())
try:
release_date = split_date_info[1].strip()
# The length of the date including year, month, and day is 8
if release_date.isdigit() and len(release_date) == 8:
return release_date
logger.error("The format of the code freeze date: %s does not meet the requirements." % release_date)
return None
except IndexError:
logger.error("error in getting code freeze date.")
return None
def get_repo(self, md_type=True):
"""
get repo according to branch 、date and epol
"""
branch = self.get_update_issue_branch()
if not branch:
raise ValueError("can not get the branch, please check.")
release_date = self.get_release_time()
if not release_date:
raise ValueError("can not get the release time, please check.")
base_url = REPO_BASE_URL + branch
repos = []
repo_dict = {
"repo_type": "standard",
"url": base_url + "/update_" + release_date + "/"
}
repos.append(repo_dict)
pkglist = self.get_update_list()
_, epol_list = self.get_standard_epol_list(branch, pkglist)
if epol_list:
repo_dict = dict()
repo_dict["repo_type"] = "epol"
if "sp2" in branch or "SP2" in branch:
repo_dict["url"] = base_url + "/EPOL/update_" + release_date + "/main/"
else:
repo_dict["url"] = base_url + "/EPOL/update_" + release_date + "/"
repos.append(repo_dict)
if md_type:
t_header = ["repo_type", "url"]
block_name = "# 2、测试repo源"
return self.init_md_table(t_head=t_header, body_info=repos, block_title=block_name)
return repos
@staticmethod
def _process_issue_id(body):
"""
Process the MD string to get the issue ID
Args:
body (str): block body
Returns:
set: current block repos
"""
content = re.compile("#[a-zA-Z0-9]+", re.S).findall(body)
if not content:
return content
return [con.replace("#", "") for con in content]
def _get_install_build_bugfix_issue_id(self, issue_body):
"""
Gets the corresponding block element with regular,
Args
issue_body: issue body str
Returns:
issue number: issue number list
"""
def update_set(res_obj):
# Call the _process_issue_id function to return the issue number
res_set = set()
issue_list = self._process_issue_id(res_obj)
res_set.update(issue_list)
return res_set
def update_res(issue_res, choice):
# If this table object exists,
# the final issue is fetched based on the selection
issues = set()
if issue_res:
issues = update_set(issue_res[choice])
return issues
# Installs the compiled table information object
install_build_res = re.compile("(?P<install_build>3、安装、自编译问题.*?\\n\\n)",
re.S).search(issue_body)
# Table information object for bugfix
bugfix_res = re.compile("(?P<bugfix>2、bugfix.*?\\n\\n)", re.S).search(issue_body)
# cve table information object
cve_res = re.compile("(?P<cve>1、CVE.*?\\n\\n)", re.S).search(issue_body)
install_build_issues = update_res(install_build_res, "install_build")
bugfix_issues = update_res(bugfix_res, "bugfix")
cve_issues = update_res(cve_res, "cve")
if not all([install_build_issues, bugfix_issues, cve_issues]):
logger.info("Block has no related issues install_build_issues:%s, "
"bugfix_issues: %s,cve_issues: %s " % (install_build_issues, bugfix_issues, cve_issues))
return list(install_build_issues), list(bugfix_issues), list(cve_issues)
def update_remain_issue_state(self, issue_list, action):
"""
Change the issue in bugfix and cve according to
whether the command is left
Args:
issue_list: issues
action: add or delete
Returns:
True or False
"""
try:
if action not in ["add", "delete"]:
raise ValueError("action parameter errors must be in add and delete")
issue_body = self.get_issue_body(self.issue_num)
if not issue_body:
raise ValueError("failed to obtain the issue description")
_, bugfix_issues, cve_issue = self._get_install_build_bugfix_issue_id(issue_body)
to_update = {}
not_exist_issues = []
for issue in issue_list:
if issue not in bugfix_issues and issue not in cve_issue:
not_exist_issues.append(issue)
logger.warning("issue %s not exist in cve and bugfix part" % issue)
continue
if issue in bugfix_issues:
t_head = ["issue", "仓库", "status"]
operate_ins = getattr(self, "bugfix" + "_object")
block_name = '## 2、bugfix'
new_con = operate_ins.get_single_issue_info(issue, block_name)[0]
else:
t_head = ["CVE", "仓库", "status", "score", "version", "abi是否变化"]
operate_ins = getattr(self, "cve" + "_object")
block_name = '## 1、CVE'
new_con = operate_ins.get_single_issue_info(issue, block_name)[0]
if action == "add":
new_con["status"] = "遗留"
to_update.setdefault(
issue, self.convert_md_table_format(t_head, [new_con])
)
body_str = self.get_new_body_lines(
issue_body, update_info=to_update, start_flag=block_name, end_flag="\n"
)
res = self.update_issue(body=body_str)
if not res:
raise ValueError("failed to %s action issue status,issue is %s" % (action, issue))
except (ValueError, AttributeError, IndexError, TypeError, KeyError) as error:
logger.error("In the %s operation, the reasons for the error are as follows: %s" % (action, error))
return False
if issue_list == not_exist_issues:
return False
return True
def get_remain_issues(self):
"""
get issues in remain block
Returns:
remain issues
"""
issue_body = self.get_issue_body(self.issue_num)
if not issue_body:
logger.error("empty body of release issue.")
return []
remain_res = re.compile("(?P<remain>4、遗留问题.*?\\n\\n)", re.S).search(issue_body)
if not remain_res:
logger.error("can not find remain issues label in release issue.")
return []
remain_issues = self._process_issue_id(remain_res["remain"])
if not remain_issues:
logger.info("can not find any remain issues in release issue.")
return list(set(remain_issues))
def get_remain_packages(self):
"""
get packages in remain block
Returns:
remain package list
"""
remain_issues = self.get_remain_issues()
remain_pkgs = []
for issue_number in remain_issues:
issue_content = self.get_issue_info(issue_number=issue_number)
if not issue_content:
logger.error("can not get the content of issue %s, perhaps this issue not exist." % issue_number)
continue
repository = issue_content.get("repository", {})
if repository.get("name"):
remain_pkgs.append(repository.get("name"))
return list(set(remain_pkgs))
def check_issue_state(self):
"""
Check the issue status under the bugfix and install_build headers
Returns:
True: update the status of the issue to the latest status successfully
False: failed to update the status of the issue to the latest status
"""
try:
body = self.get_issue_body(self.issue_num)
if not body:
raise ValueError("failed to get issue description information")
# get the bugfix and the issue number under the install_build and cve table headers
install_build_issues, bugfix_issues, _ = self._get_install_build_bugfix_issue_id(body)
remain_issues = self.get_remain_issues()
if install_build_issues:
install_build_issues = [issue for issue in install_build_issues if issue not in remain_issues]
self.operate_release_issue(operation="update", operate_block="install_build",
issues=install_build_issues)
if bugfix_issues:
bugfix_issues = [issue for issue in bugfix_issues if issue not in remain_issues]
self.operate_release_issue(operation="update", operate_block="bugfix",
issues=bugfix_issues)
except (ValueError, TypeError, KeyError, AttributeError) as error:
logger.error("failed to update the status of the issue, the specific reason is %s" % error)
return False
return True
def init_issue_description(self, *args):
"""
initialize the release issue body when commenting "start-update" command
Returns:
True or False
"""
update_info = self.get_update_version_info()
if not update_info:
return False
release_range = "# 1、发布范围\n"
cve_block_str = self.cve_object.init(*args)
bugfix_block_str = self.bugfix_object.init()
requires_block_str = self.requires_object.init()
repo_block_str = self.init_repo_table()
install_build_block_str = self.install_build_object.init()
remain_block_str = self.remain_object.init()
body_str = (
update_info
+ release_range
+ cve_block_str
+ bugfix_block_str
+ requires_block_str
+ repo_block_str
+ install_build_block_str
+ remain_block_str
)
return True if self.update_issue(body=body_str) else False
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for specific operation
Args:
operate: operate str. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
new issue body str
"""
old_body_str = self.get_issue_body(self.issue_num)
if not old_body_str:
logger.error("The current issue has no content, please start first.")
return False
update_block = args[0]
# get the block object, like cve block object, and then call
# "get_new_issue_body" for this block
operate_object = getattr(self, update_block + "_object")
body_str = operate_object.get_new_issue_body(
operate=operate, body_str=old_body_str, issues=issues)
return body_str
def update_issue_description(self, operate, update_block, issues=None):
"""
to update issue description
Args:
operate: operate in {add,delete}.
update_block: block name, like cve or bugfix,
issues: issue list.
returns:
True or False
"""
if not issues:
issues = []
old_body_str = self.get_issue_body(self.issue_num)
if not old_body_str:
logger.error(
"The current issue has no content, please start first.")
return False
body_str = self.get_new_issue_body(update_block, operate=operate, issues=issues)
if not body_str:
logger.error(
"after update issue description, got empty new release issue body.")
return False
return True if self.update_issue(body=body_str) else False
def count_issue_status(self):
"""
statistics of the status of all issues
Returns:
true: the status of all issue is completed
false: there is an unfinished issue
"""
try:
body = self.get_issue_body(self.issue_num)
# obtain the issue number under installation, compilation and bugfix
install_build_issues, bugfix_issues, _ = self._get_install_build_bugfix_issue_id(body)
issues = install_build_issues + bugfix_issues
unfinished_issues = []
if not issues:
logger.info("no issue in install_build and bugfix block.")
return True
# traverse all issues, get the status of the issue,
# and add the unfinished ones to the unfinished list
for issue_number in issues:
issue_content = self.get_issue_info(issue_number)
if not issue_content:
logger.error("failed to get the issue info of %s. " % issue_number)
continue
if issue_content.get("issue_state") != "已完成":
unfinished_issues.append(issue_number)
if unfinished_issues:
logger.info("The following issue status is not complete %s" % ",".join(unfinished_issues))
return False
except (ValueError, TypeError) as error:
logger.error("an error occurred while counting the status of the issue. "
"The error is %s" % error)
return False
return True
@staticmethod
def release_announcement(user_name, password):
"""
release announcement
Args:
user_name: user name
password: password
Returns:
return true on success, false on failure
"""
try:
response = requests.post(RELEASE_URL, data={"username": user_name,
"password": password})
if response.status_code == 200:
if "successfully" in json.loads(response.text):
logger.info("release announcement successfully")
return True
logger.error(response.text)
return False
logger.error("failed to request the announcement address: %s ,"
"because of the response status code is %s "
"response body is %s " % (RELEASE_URL, response.status_code, response.text))
return False
except (requests.RequestException, AttributeError, json.JSONDecodeError) as error:
logger.error("failed to request the announcement address: %s ,"
"because of %s" % (RELEASE_URL, error))
return False
def operate_release_issue(self, *args, operation="init", operate_block=None, issues=None):
"""
modify entrance of the release issue
Args:
operation: {init,add,delete}
operate_block: block to operate
when the operation is "init", operate_block=None
issues: issue list
Returns:
True or False
"""
try:
if operation == "init":
return self.init_issue_description(*args)
else:
return self.update_issue_description(
operate=operation, update_block=operate_block, issues=issues
)
except ValueError as e:
logger.error(e)
return False
|
[
"json.loads",
"javcra.libs.log.logger.error",
"javcra.libs.log.logger.warning",
"datetime.date.today",
"datetime.timedelta",
"requests.get",
"javcra.libs.log.logger.info",
"requests.post",
"retrying.retry",
"datetime.datetime.now",
"re.compile"
] |
[((12740, 12806), 're.compile', 're.compile', (['"""openEuler评分.*?(?P<euler_score>[0-9\\\\.]+)"""'], {'flags': 're.S'}), "('openEuler评分.*?(?P<euler_score>[0-9\\\\.]+)', flags=re.S)\n", (12750, 12806), False, 'import re\n'), ((13509, 13566), 're.compile', 're.compile', (['"""修复是否涉及abi变化.*?(?P<abi>.*)[\\\\n$]"""'], {'flags': 're.S'}), "('修复是否涉及abi变化.*?(?P<abi>.*)[\\\\n$]', flags=re.S)\n", (13519, 13566), False, 'import re\n'), ((20349, 20399), 'retrying.retry', 'retry', ([], {'stop_max_attempt_number': '(5)', 'wait_fixed': '(60000)'}), '(stop_max_attempt_number=5, wait_fixed=60000)\n', (20354, 20399), False, 'from retrying import retry\n'), ((21838, 21925), 'javcra.libs.log.logger.info', 'logger.info', (['"""Start to obtain cve archive information, it may take a few minutes."""'], {}), "(\n 'Start to obtain cve archive information, it may take a few minutes.')\n", (21849, 21925), False, 'from javcra.libs.log import logger\n'), ((9839, 9909), 'javcra.libs.log.logger.error', 'logger.error', (['("Failed to create Jenkins\' comment message %s" % comment)'], {}), '("Failed to create Jenkins\' comment message %s" % comment)\n', (9851, 9909), False, 'from javcra.libs.log import logger\n'), ((13018, 13070), 're.compile', 're.compile', (['"""BaseScore[::](?P<base_score>[0-9\\\\.]+)"""'], {}), "('BaseScore[::](?P<base_score>[0-9\\\\.]+)')\n", (13028, 13070), False, 'import re\n'), ((13659, 13713), 'javcra.libs.log.logger.error', 'logger.error', (['"""The abi pattern did not match the info"""'], {}), "('The abi pattern did not match the info')\n", (13671, 13713), False, 'from javcra.libs.log import logger\n'), ((19228, 19267), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers'}), '(url, headers=self.headers)\n', (19240, 19267), False, 'import requests\n'), ((19508, 19667), 'javcra.libs.log.logger.error', 'logger.error', (["('The CVE List file fails to be archived,The response status code is %s,the response body is %s'\n % (response.status_code, response.text))"], {}), "(\n 'The CVE List file fails to be archived,The response status code is %s,the response body is %s'\n % (response.status_code, response.text))\n", (19520, 19667), False, 'from javcra.libs.log import logger\n'), ((27186, 27292), 'javcra.libs.log.logger.error', 'logger.error', (['"""failed to create install build issue because the release issue branch not found."""'], {}), "(\n 'failed to create install build issue because the release issue branch not found.'\n )\n", (27198, 27292), False, 'from javcra.libs.log import logger\n'), ((27396, 27489), 'javcra.libs.log.logger.error', 'logger.error', (['"""failed to create install build issue because the release time not found."""'], {}), "(\n 'failed to create install build issue because the release time not found.')\n", (27408, 27489), False, 'from javcra.libs.log import logger\n'), ((29589, 29638), 'javcra.libs.log.logger.error', 'logger.error', (['"""no content of release issue body."""'], {}), "('no content of release issue body.')\n", (29601, 29638), False, 'from javcra.libs.log import logger\n'), ((29793, 29859), 'javcra.libs.log.logger.error', 'logger.error', (['"""the code freeze time is not in release issue body."""'], {}), "('the code freeze time is not in release issue body.')\n", (29805, 29859), False, 'from javcra.libs.log import logger\n'), ((30218, 30328), 'javcra.libs.log.logger.error', 'logger.error', (["('The format of the code freeze date: %s does not meet the requirements.' %\n release_date)"], {}), "(\n 'The format of the code freeze date: %s does not meet the requirements.' %\n release_date)\n", (30230, 30328), False, 'from javcra.libs.log import logger\n'), ((33734, 33898), 'javcra.libs.log.logger.info', 'logger.info', (["('Block has no related issues install_build_issues:%s, bugfix_issues: %s,cve_issues: %s '\n % (install_build_issues, bugfix_issues, cve_issues))"], {}), "(\n 'Block has no related issues install_build_issues:%s, bugfix_issues: %s,cve_issues: %s '\n % (install_build_issues, bugfix_issues, cve_issues))\n", (33745, 33898), False, 'from javcra.libs.log import logger\n'), ((36751, 36795), 'javcra.libs.log.logger.error', 'logger.error', (['"""empty body of release issue."""'], {}), "('empty body of release issue.')\n", (36763, 36795), False, 'from javcra.libs.log import logger\n'), ((36946, 37012), 'javcra.libs.log.logger.error', 'logger.error', (['"""can not find remain issues label in release issue."""'], {}), "('can not find remain issues label in release issue.')\n", (36958, 37012), False, 'from javcra.libs.log import logger\n'), ((37147, 37210), 'javcra.libs.log.logger.info', 'logger.info', (['"""can not find any remain issues in release issue."""'], {}), "('can not find any remain issues in release issue.')\n", (37158, 37210), False, 'from javcra.libs.log import logger\n'), ((41039, 41108), 'javcra.libs.log.logger.error', 'logger.error', (['"""The current issue has no content, please start first."""'], {}), "('The current issue has no content, please start first.')\n", (41051, 41108), False, 'from javcra.libs.log import logger\n'), ((41975, 42044), 'javcra.libs.log.logger.error', 'logger.error', (['"""The current issue has no content, please start first."""'], {}), "('The current issue has no content, please start first.')\n", (41987, 42044), False, 'from javcra.libs.log import logger\n'), ((42215, 42301), 'javcra.libs.log.logger.error', 'logger.error', (['"""after update issue description, got empty new release issue body."""'], {}), "(\n 'after update issue description, got empty new release issue body.')\n", (42227, 42301), False, 'from javcra.libs.log import logger\n'), ((44326, 44404), 'requests.post', 'requests.post', (['RELEASE_URL'], {'data': "{'username': user_name, 'password': password}"}), "(RELEASE_URL, data={'username': user_name, 'password': password})\n", (44339, 44404), False, 'import requests\n'), ((44755, 44944), 'javcra.libs.log.logger.error', 'logger.error', (["('failed to request the announcement address: %s ,because of the response status code is %s response body is %s '\n % (RELEASE_URL, response.status_code, response.text))"], {}), "(\n 'failed to request the announcement address: %s ,because of the response status code is %s response body is %s '\n % (RELEASE_URL, response.status_code, response.text))\n", (44767, 44944), False, 'from javcra.libs.log import logger\n'), ((9572, 9619), 'javcra.libs.log.logger.error', 'logger.error', (['"""failed to obtain jenkins_result"""'], {}), "('failed to obtain jenkins_result')\n", (9584, 9619), False, 'from javcra.libs.log import logger\n'), ((15405, 15445), 're.compile', 're.compile', (['"""漏洞归属的版本[::](?P<version>.*)"""'], {}), "('漏洞归属的版本[::](?P<version>.*)')\n", (15415, 15445), False, 'import re\n'), ((19374, 19477), 'javcra.libs.log.logger.info', 'logger.info', (['"""The CVE-Manager is triggered to generate the CVE list and archive the CVE list"""'], {}), "(\n 'The CVE-Manager is triggered to generate the CVE list and archive the CVE list'\n )\n", (19385, 19477), False, 'from javcra.libs.log import logger\n'), ((19820, 19894), 'javcra.libs.log.logger.error', 'logger.error', (["('The CVE List file fails to be archived because %s ' % error)"], {}), "('The CVE List file fails to be archived because %s ' % error)\n", (19832, 19894), False, 'from javcra.libs.log import logger\n'), ((20861, 20900), 'javcra.libs.log.logger.error', 'logger.error', (['"""Failed to obtain branch"""'], {}), "('Failed to obtain branch')\n", (20873, 20900), False, 'from javcra.libs.log import logger\n'), ((21107, 21148), 'javcra.libs.log.logger.error', 'logger.error', (['"""Failed to obtain CVE data"""'], {}), "('Failed to obtain CVE data')\n", (21119, 21148), False, 'from javcra.libs.log import logger\n'), ((28955, 29057), 'javcra.libs.log.logger.error', 'logger.error', (['"""Issue has CVE content, maybe you already have operated start update command."""'], {}), "(\n 'Issue has CVE content, maybe you already have operated start update command.'\n )\n", (28967, 29057), False, 'from javcra.libs.log import logger\n'), ((29134, 29200), 'javcra.libs.log.logger.error', 'logger.error', (['"""the code freeze time is not in release issue body."""'], {}), "('the code freeze time is not in release issue body.')\n", (29146, 29200), False, 'from javcra.libs.log import logger\n'), ((29684, 29735), 're.compile', 're.compile', (['"""(?P<release_date>代码冻结.*?\\\\n\\\\n)"""', 're.S'], {}), "('(?P<release_date>代码冻结.*?\\\\n\\\\n)', re.S)\n", (29694, 29735), False, 'import re\n'), ((30384, 30434), 'javcra.libs.log.logger.error', 'logger.error', (['"""error in getting code freeze date."""'], {}), "('error in getting code freeze date.')\n", (30396, 30434), False, 'from javcra.libs.log import logger\n'), ((32044, 32077), 're.compile', 're.compile', (['"""#[a-zA-Z0-9]+"""', 're.S'], {}), "('#[a-zA-Z0-9]+', re.S)\n", (32054, 32077), False, 'import re\n'), ((33093, 33151), 're.compile', 're.compile', (['"""(?P<install_build>3、安装、自编译问题.*?\\\\n\\\\n)"""', 're.S'], {}), "('(?P<install_build>3、安装、自编译问题.*?\\\\n\\\\n)', re.S)\n", (33103, 33151), False, 'import re\n'), ((33277, 33326), 're.compile', 're.compile', (['"""(?P<bugfix>2、bugfix.*?\\\\n\\\\n)"""', 're.S'], {}), "('(?P<bugfix>2、bugfix.*?\\\\n\\\\n)', re.S)\n", (33287, 33326), False, 'import re\n'), ((33404, 33447), 're.compile', 're.compile', (['"""(?P<cve>1、CVE.*?\\\\n\\\\n)"""', 're.S'], {}), "('(?P<cve>1、CVE.*?\\\\n\\\\n)', re.S)\n", (33414, 33447), False, 'import re\n'), ((36306, 36415), 'javcra.libs.log.logger.error', 'logger.error', (["('In the %s operation, the reasons for the error are as follows: %s' % (\n action, error))"], {}), "(\n 'In the %s operation, the reasons for the error are as follows: %s' % (\n action, error))\n", (36318, 36415), False, 'from javcra.libs.log import logger\n'), ((36840, 36887), 're.compile', 're.compile', (['"""(?P<remain>4、遗留问题.*?\\\\n\\\\n)"""', 're.S'], {}), "('(?P<remain>4、遗留问题.*?\\\\n\\\\n)', re.S)\n", (36850, 36887), False, 'import re\n'), ((37640, 37746), 'javcra.libs.log.logger.error', 'logger.error', (["('can not get the content of issue %s, perhaps this issue not exist.' %\n issue_number)"], {}), "(\n 'can not get the content of issue %s, perhaps this issue not exist.' %\n issue_number)\n", (37652, 37746), False, 'from javcra.libs.log import logger\n'), ((39376, 39476), 'javcra.libs.log.logger.error', 'logger.error', (["('failed to update the status of the issue, the specific reason is %s' % error)"], {}), "(\n 'failed to update the status of the issue, the specific reason is %s' %\n error)\n", (39388, 39476), False, 'from javcra.libs.log import logger\n'), ((43017, 43075), 'javcra.libs.log.logger.info', 'logger.info', (['"""no issue in install_build and bugfix block."""'], {}), "('no issue in install_build and bugfix block.')\n", (43028, 43075), False, 'from javcra.libs.log import logger\n'), ((43847, 43954), 'javcra.libs.log.logger.error', 'logger.error', (["('an error occurred while counting the status of the issue. The error is %s' %\n error)"], {}), "(\n 'an error occurred while counting the status of the issue. The error is %s'\n % error)\n", (43859, 43954), False, 'from javcra.libs.log import logger\n'), ((44686, 44713), 'javcra.libs.log.logger.error', 'logger.error', (['response.text'], {}), '(response.text)\n', (44698, 44713), False, 'from javcra.libs.log import logger\n'), ((45119, 45229), 'javcra.libs.log.logger.error', 'logger.error', (["('failed to request the announcement address: %s ,because of %s' % (\n RELEASE_URL, error))"], {}), "(\n 'failed to request the announcement address: %s ,because of %s' % (\n RELEASE_URL, error))\n", (45131, 45229), False, 'from javcra.libs.log import logger\n'), ((46011, 46026), 'javcra.libs.log.logger.error', 'logger.error', (['e'], {}), '(e)\n', (46023, 46026), False, 'from javcra.libs.log import logger\n'), ((18914, 18937), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18935, 18937), False, 'import datetime\n'), ((18940, 18968), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-90)'}), '(days=-90)\n', (18958, 18968), False, 'import datetime\n'), ((28884, 28918), 're.compile', 're.compile', (['"""1、CVE.*?\\\\n\\\\n"""', 're.S'], {}), "('1、CVE.*?\\\\n\\\\n', re.S)\n", (28894, 28918), False, 'import re\n'), ((34946, 35013), 'javcra.libs.log.logger.warning', 'logger.warning', (["('issue %s not exist in cve and bugfix part' % issue)"], {}), "('issue %s not exist in cve and bugfix part' % issue)\n", (34960, 35013), False, 'from javcra.libs.log import logger\n'), ((43398, 43465), 'javcra.libs.log.logger.error', 'logger.error', (["('failed to get the issue info of %s. ' % issue_number)"], {}), "('failed to get the issue info of %s. ' % issue_number)\n", (43410, 43465), False, 'from javcra.libs.log import logger\n'), ((44542, 44567), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (44552, 44567), False, 'import json\n'), ((44589, 44637), 'javcra.libs.log.logger.info', 'logger.info', (['"""release announcement successfully"""'], {}), "('release announcement successfully')\n", (44600, 44637), False, 'from javcra.libs.log import logger\n'), ((20605, 20626), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (20624, 20626), False, 'import datetime\n'), ((20649, 20670), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (20668, 20670), False, 'import datetime\n'), ((20694, 20715), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (20713, 20715), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
#
# Tests for ``sim.py``
# These tests were hand calculated by <NAME>: <EMAIL>
#
from clusim.clustering import Clustering
import clusim.sim as sim
from clusim.dag import DAG
import clusim.clusimelement as clusimelement
from numpy.testing import assert_approx_equal
from numpy import mean
def test_comparison_example():
c1_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0], 4: [2], 5: [1]}
c2_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0], 4: [2], 5: [2]}
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict)
N11, N10, N01, N00 = sim.count_pairwise_cooccurence(c1, c2)
assert N11 == 2, "Element Co-occurance counts for N11 does not match. %s != %s" % (N11, 2)
assert N10 == 2, "Element Co-occurance counts for N10 does not match. %s != %s" % (N10, 2)
assert N01 == 1, "Element Co-occurance counts for N01 does not match. %s != %s" % (N01, 1)
assert N00 == 10, "Element Co-occurance counts for N00 does not match. %s != %s" % (N00, 10)
known_sim_values = {'jaccard_index': 0.4,
'rand_index': 0.8,
'fowlkes_mallows_index': 0.5773502691896258,
'rogers_tanimoto_index': 2./3.,
'southwood_index': 2./3.,
'czekanowski_index': 0.5714285714285714,
'dice_index': 0.5714285714285714,
'sorensen_index': 0.5714285714285714,
'pearson_correlation': 0.011363636363636364,
'classification_error': 0.16666666666666674,
'purity_index': 0.8333333333333333,
'fmeasure': 0.5714285714285714,
'nmi': 0.7396673768007593,
'vi': 0.792481250360578,
'geometric_accuracy': 0.8333333333333334,
'overlap_quality': 0.0,
'onmi': 0.7449589906475155,
'omega_index': 0.44444444444444453
}
for simfunc in sim.available_similarity_measures:
simvalue = eval('sim.' + simfunc+'(c1, c2)')
assert simvalue == known_sim_values[simfunc], "Similarity Measure %s does not match. %s != %s" % (simfunc, simvalue, known_sim_values[simfunc])
def test_model_example():
c1_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0]}
c2_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [1]}
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict)
known_rand_values = {'perm': 0.5,
'perm1': 0.5,
'num': 0.510204081632653,
'num1': 0.5,
'all': 0.555555555555556,
'all1': 0.5
}
known_mi_values = {'perm': 0.311278124459133,
'perm1': 0.311278124459133,
'num': 0.309927805548467,
'num1': 0.301825892084476,
'all': 0.611635721962606,
'all1': 0.419448541053684
}
for rdm in sim.available_random_models:
exp_rand_value = sim.expected_rand_index(n_elements=c1.n_elements,
n_clusters1=c1.n_clusters,
n_clusters2=c2.n_clusters,
clu_size_seq1=c1.clu_size_seq,
clu_size_seq2=c2.clu_size_seq,
random_model=rdm
)
assert_approx_equal(exp_rand_value, known_rand_values[rdm], 10**(-10), "Expected Rand Index with %s Random Model does not match. %s != %s" % (rdm, exp_rand_value, known_rand_values[rdm]))
exp_mi_value = sim.expected_mi(n_elements=c1.n_elements,
n_clusters1=c1.n_clusters,
n_clusters2=c2.n_clusters,
clu_size_seq1=c1.clu_size_seq,
clu_size_seq2=c2.clu_size_seq,
random_model=rdm,
logbase=2.)
assert_approx_equal(exp_mi_value, known_mi_values[rdm], 10**(-10), "Expected MI with %s Random Model does not match. %s != %s" % (rdm, exp_mi_value, known_mi_values[rdm]) )
def test_elementsim_example():
# taken from Fig 3 of Gates et al (2018) Scientific Reports
# overlapping clustering
c1_elm2clu_dict = {0: [0], 1: [0], 2: [0], 3: [1], 4: [1], 5: [1, 2], 6: [2]}
# hierarchical clustering
c2_elm2clu_dict = {0: [1], 1: [1], 2: [2], 3: [5], 4: [5], 5: [6, 8], 6: [9]}
c2_dag = DAG()
c2_dag.add_edges_from([(0, 1), (0, 2), (3, 4), (4, 5), (4, 6), (3, 7), (7, 8), (7, 9)])
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict, hier_graph=c2_dag)
known_elsim = [0.92875658, 0.92875658, 0.85751315, 0.25717544, 0.74282456, 0.82083876, 0.80767074]
elsim, ellabels = clusimelement.element_sim_elscore(c1, c2, alpha=0.9, r=1., r2=None, rescale_path_type='max')
for i in range(7):
assert_approx_equal(elsim[i], known_elsim[i], 10**(-10), "Element-centric similarity for element %s does not match. %s != %s" % (i, elsim[i], known_elsim[i]) )
if __name__ == "__main__":
test_comparison_example()
test_model_example()
test_elementsim_example()
|
[
"clusim.sim.expected_mi",
"clusim.sim.expected_rand_index",
"clusim.clusimelement.element_sim_elscore",
"clusim.dag.DAG",
"clusim.clustering.Clustering",
"numpy.testing.assert_approx_equal",
"clusim.sim.count_pairwise_cooccurence"
] |
[((499, 539), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c1_elm2clu_dict'}), '(elm2clu_dict=c1_elm2clu_dict)\n', (509, 539), False, 'from clusim.clustering import Clustering\n'), ((549, 589), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c2_elm2clu_dict'}), '(elm2clu_dict=c2_elm2clu_dict)\n', (559, 589), False, 'from clusim.clustering import Clustering\n'), ((616, 654), 'clusim.sim.count_pairwise_cooccurence', 'sim.count_pairwise_cooccurence', (['c1', 'c2'], {}), '(c1, c2)\n', (646, 654), True, 'import clusim.sim as sim\n'), ((2501, 2541), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c1_elm2clu_dict'}), '(elm2clu_dict=c1_elm2clu_dict)\n', (2511, 2541), False, 'from clusim.clustering import Clustering\n'), ((2551, 2591), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c2_elm2clu_dict'}), '(elm2clu_dict=c2_elm2clu_dict)\n', (2561, 2591), False, 'from clusim.clustering import Clustering\n'), ((4906, 4911), 'clusim.dag.DAG', 'DAG', ([], {}), '()\n', (4909, 4911), False, 'from clusim.dag import DAG\n'), ((5014, 5054), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c1_elm2clu_dict'}), '(elm2clu_dict=c1_elm2clu_dict)\n', (5024, 5054), False, 'from clusim.clustering import Clustering\n'), ((5064, 5123), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c2_elm2clu_dict', 'hier_graph': 'c2_dag'}), '(elm2clu_dict=c2_elm2clu_dict, hier_graph=c2_dag)\n', (5074, 5123), False, 'from clusim.clustering import Clustering\n'), ((5251, 5348), 'clusim.clusimelement.element_sim_elscore', 'clusimelement.element_sim_elscore', (['c1', 'c2'], {'alpha': '(0.9)', 'r': '(1.0)', 'r2': 'None', 'rescale_path_type': '"""max"""'}), "(c1, c2, alpha=0.9, r=1.0, r2=None,\n rescale_path_type='max')\n", (5284, 5348), True, 'import clusim.clusimelement as clusimelement\n'), ((3268, 3460), 'clusim.sim.expected_rand_index', 'sim.expected_rand_index', ([], {'n_elements': 'c1.n_elements', 'n_clusters1': 'c1.n_clusters', 'n_clusters2': 'c2.n_clusters', 'clu_size_seq1': 'c1.clu_size_seq', 'clu_size_seq2': 'c2.clu_size_seq', 'random_model': 'rdm'}), '(n_elements=c1.n_elements, n_clusters1=c1.n_clusters,\n n_clusters2=c2.n_clusters, clu_size_seq1=c1.clu_size_seq, clu_size_seq2\n =c2.clu_size_seq, random_model=rdm)\n', (3291, 3460), True, 'import clusim.sim as sim\n'), ((3755, 3952), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['exp_rand_value', 'known_rand_values[rdm]', '(10 ** -10)', "('Expected Rand Index with %s Random Model does not match. %s != %s' % (rdm,\n exp_rand_value, known_rand_values[rdm]))"], {}), "(exp_rand_value, known_rand_values[rdm], 10 ** -10, \n 'Expected Rand Index with %s Random Model does not match. %s != %s' % (\n rdm, exp_rand_value, known_rand_values[rdm]))\n", (3774, 3952), False, 'from numpy.testing import assert_approx_equal\n'), ((3967, 4164), 'clusim.sim.expected_mi', 'sim.expected_mi', ([], {'n_elements': 'c1.n_elements', 'n_clusters1': 'c1.n_clusters', 'n_clusters2': 'c2.n_clusters', 'clu_size_seq1': 'c1.clu_size_seq', 'clu_size_seq2': 'c2.clu_size_seq', 'random_model': 'rdm', 'logbase': '(2.0)'}), '(n_elements=c1.n_elements, n_clusters1=c1.n_clusters,\n n_clusters2=c2.n_clusters, clu_size_seq1=c1.clu_size_seq, clu_size_seq2\n =c2.clu_size_seq, random_model=rdm, logbase=2.0)\n', (3982, 4164), True, 'import clusim.sim as sim\n'), ((4397, 4577), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['exp_mi_value', 'known_mi_values[rdm]', '(10 ** -10)', "('Expected MI with %s Random Model does not match. %s != %s' % (rdm,\n exp_mi_value, known_mi_values[rdm]))"], {}), "(exp_mi_value, known_mi_values[rdm], 10 ** -10, \n 'Expected MI with %s Random Model does not match. %s != %s' % (rdm,\n exp_mi_value, known_mi_values[rdm]))\n", (4416, 4577), False, 'from numpy.testing import assert_approx_equal\n'), ((5376, 5543), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['elsim[i]', 'known_elsim[i]', '(10 ** -10)', "('Element-centric similarity for element %s does not match. %s != %s' % (i,\n elsim[i], known_elsim[i]))"], {}), "(elsim[i], known_elsim[i], 10 ** -10, \n 'Element-centric similarity for element %s does not match. %s != %s' %\n (i, elsim[i], known_elsim[i]))\n", (5395, 5543), False, 'from numpy.testing import assert_approx_equal\n')]
|
from HystrixBox.Tools.recursiveDecompression import extract_recursive
import filecmp
import os
TEST1 = '''File not found\n'''
TEST2 = '''Not a zip file or corrupted zip file\n'''
def compareDir(dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
"""
compared = filecmp.dircmp(dir1, dir2)
if (compared.left_only or compared.right_only or compared.diff_files
or compared.funny_files):
return False
for subdir in compared.common_dirs:
if not compareDir(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True
def test_extract_recursive_true(tmpdir):
path = tmpdir.strpath
extract_recursive('../examples/recursivezip.zip', path)
assert compareDir(path, '../examples/RecursiveZipExtracted/')
def test_extract_recursive_1layer(tmpdir):
path = tmpdir.strpath
extract_recursive('../examples/root.zip', path)
assert compareDir(path, '../examples/RecursiveZipExtracted/1Introduction/2Introduction/3Introduction')
def test_extract_recursive_noFile(capfd, tmpdir):
path = tmpdir.strpath
extract_recursive('', path)
out, err = capfd.readouterr()
assert (out == TEST1)
def test_extract_recursive_noZipFile(capfd, tmpdir):
path = tmpdir.strpath
extract_recursive('../examples/extractor.txt', path)
out, err = capfd.readouterr()
assert (out == TEST2)
|
[
"HystrixBox.Tools.recursiveDecompression.extract_recursive",
"os.path.join",
"filecmp.dircmp"
] |
[((354, 380), 'filecmp.dircmp', 'filecmp.dircmp', (['dir1', 'dir2'], {}), '(dir1, dir2)\n', (368, 380), False, 'import filecmp\n'), ((750, 805), 'HystrixBox.Tools.recursiveDecompression.extract_recursive', 'extract_recursive', (['"""../examples/recursivezip.zip"""', 'path'], {}), "('../examples/recursivezip.zip', path)\n", (767, 805), False, 'from HystrixBox.Tools.recursiveDecompression import extract_recursive\n'), ((947, 994), 'HystrixBox.Tools.recursiveDecompression.extract_recursive', 'extract_recursive', (['"""../examples/root.zip"""', 'path'], {}), "('../examples/root.zip', path)\n", (964, 994), False, 'from HystrixBox.Tools.recursiveDecompression import extract_recursive\n'), ((1184, 1211), 'HystrixBox.Tools.recursiveDecompression.extract_recursive', 'extract_recursive', (['""""""', 'path'], {}), "('', path)\n", (1201, 1211), False, 'from HystrixBox.Tools.recursiveDecompression import extract_recursive\n'), ((1357, 1409), 'HystrixBox.Tools.recursiveDecompression.extract_recursive', 'extract_recursive', (['"""../examples/extractor.txt"""', 'path'], {}), "('../examples/extractor.txt', path)\n", (1374, 1409), False, 'from HystrixBox.Tools.recursiveDecompression import extract_recursive\n'), ((579, 605), 'os.path.join', 'os.path.join', (['dir1', 'subdir'], {}), '(dir1, subdir)\n', (591, 605), False, 'import os\n'), ((607, 633), 'os.path.join', 'os.path.join', (['dir2', 'subdir'], {}), '(dir2, subdir)\n', (619, 633), False, 'import os\n')]
|
"""Generate files to declare fortran functions"""
import os
import re
import logging
import importlib
from docutils.statemachine import string2lines
from sphinx.util.docutils import SphinxDirective
path_pat_mod_dir = os.path.join("{gendir}", "{mod_name}")
path_pat_mod_file = os.path.join(path_pat_mod_dir, "index.rst")
path_pat_func_file = os.path.join(path_pat_mod_dir, "{func_name}.rst")
def checkdir(path):
pdir = os.path.dirname(path)
if not os.path.exists(pdir):
os.makedirs(pdir)
class GenFortran(SphinxDirective):
has_content = True
def run(self):
if not self.content:
return []
# Loop on modules and descriptions
rst_toctree = ".. toctree::\n\t:hidden:\n\n"
rst_table = ".. list-table::\n\n"
for mod_name_desc in self.content:
smod = mod_name_desc.split(" ")
mod_name = smod[0]
mod_desc = " ".join(smod[1:])
rst_toctree += f"\tgenfortran/{mod_name}/index\n"
rst_table += f"\t* - :mod:`{mod_name}`\n"
rst_table += f"\t - {mod_desc}\n"
# Insert toctree and tables
rst_all = rst_toctree + "\n\n" + rst_table + "\n"
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
include_lines = string2lines(rst_all, convert_whitespace=1)
self.state_machine.insert_input(include_lines, source)
return []
re_directive_match = re.compile(
r"^(?P<indent>\s*)\.\.\s+genfortran::\s*\n$").match
re_indent_match = re.compile(r"^(?P<indent>\s*)\S.+\n$").match
def generate_stub_files(srcdir, mod_name, mod_desc):
gendir = os.path.join(srcdir, "genfortran")
logging.info(f"Generating rst files for fortran wrapper "+mod_name)
mod_content = importlib.import_module(mod_name)
func_names = [func for func in dir(mod_content)
if not func.startswith('_')]
# Write files
mod_dir = path_pat_mod_dir.format(**locals())
if not os.path.exists(mod_dir):
os.makedirs(mod_dir)
mod_file = path_pat_mod_file.format(**locals())
with open(mod_file, "w") as f:
f.write(mod_name + "\n" + len(mod_name)*"=" + "\n\n")
f.write(mod_desc + "\n\n")
f.write(f".. module:: {mod_name}\n\n")
rst_table = ".. list-table::\n\n"
rst_toctree = ".. toctree::\n\t:hidden:\n\n"
for func_name in func_names:
rst_table += f"\t* - :func:`{mod_name}.{func_name}`\n"
func = getattr(mod_content, func_name)
func_sig = func.__doc__.split("\n")[0]
rst_table += f"\t - {func_sig}\n"
rst_toctree += f"\t{func_name}\n"
with open(path_pat_func_file.format(
**locals()), "w") as ff:
ff.write(func_name+"\n"+len(func_name)*"="+"\n\n")
ff.write(f".. currentmodule:: {mod_name}\n\n")
out, call = func_sig.split('=')
ff.write(f".. autofunction:: {call}\n\n")
f.write(rst_toctree+"\n\n")
f.write(rst_table)
def parse_and_generate(app):
"""Parse rst files to find directives and generate stub files"""
# Get file list
env = app.builder.env
srcdir = env.srcdir
if app.config.genfortran_src_files:
srcfiles = [os.path.join(srcdir, srcfile) for srcfile
in app.config.genfortran_src_files]
else:
env = app.builder.env
srcfiles = [env.doc2path(x, base=None) for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
# Parse files
for srcfile in srcfiles:
if not os.path.exists(srcfile):
logging.warning("[genfortran] file not found: "+srcfile)
continue
with open(srcfile) as f:
indent = None
for line in f:
m = re_directive_match(line)
if m:
indent = m.group('indent')
continue
if indent is None:
continue
m = re.match("^"+indent + r"\s+(?P<mod_name>[\w.]+)" +
r"(?P<mod_desc>\s.*)\n$", line)
if m:
generate_stub_files(
srcdir, m.group("mod_name"),
m.group("mod_desc").strip())
continue
m = re_indent_match(line)
if m and len(m.group('indent')) <= len(indent):
indent = None
def setup(app):
app.add_directive("genfortran", GenFortran)
app.connect('builder-inited', parse_and_generate)
app.add_config_value('genfortran_src_files', [], [], [list])
return {'version': '0.1'}
|
[
"docutils.statemachine.string2lines",
"os.makedirs",
"importlib.import_module",
"logging.warning",
"os.path.dirname",
"os.path.exists",
"re.match",
"logging.info",
"os.path.join",
"re.compile"
] |
[((221, 259), 'os.path.join', 'os.path.join', (['"""{gendir}"""', '"""{mod_name}"""'], {}), "('{gendir}', '{mod_name}')\n", (233, 259), False, 'import os\n'), ((280, 323), 'os.path.join', 'os.path.join', (['path_pat_mod_dir', '"""index.rst"""'], {}), "(path_pat_mod_dir, 'index.rst')\n", (292, 323), False, 'import os\n'), ((345, 394), 'os.path.join', 'os.path.join', (['path_pat_mod_dir', '"""{func_name}.rst"""'], {}), "(path_pat_mod_dir, '{func_name}.rst')\n", (357, 394), False, 'import os\n'), ((428, 449), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (443, 449), False, 'import os\n'), ((1491, 1552), 're.compile', 're.compile', (['"""^(?P<indent>\\\\s*)\\\\.\\\\.\\\\s+genfortran::\\\\s*\\\\n$"""'], {}), "('^(?P<indent>\\\\s*)\\\\.\\\\.\\\\s+genfortran::\\\\s*\\\\n$')\n", (1501, 1552), False, 'import re\n'), ((1577, 1617), 're.compile', 're.compile', (['"""^(?P<indent>\\\\s*)\\\\S.+\\\\n$"""'], {}), "('^(?P<indent>\\\\s*)\\\\S.+\\\\n$')\n", (1587, 1617), False, 'import re\n'), ((1691, 1725), 'os.path.join', 'os.path.join', (['srcdir', '"""genfortran"""'], {}), "(srcdir, 'genfortran')\n", (1703, 1725), False, 'import os\n'), ((1731, 1800), 'logging.info', 'logging.info', (["(f'Generating rst files for fortran wrapper ' + mod_name)"], {}), "(f'Generating rst files for fortran wrapper ' + mod_name)\n", (1743, 1800), False, 'import logging\n'), ((1817, 1850), 'importlib.import_module', 'importlib.import_module', (['mod_name'], {}), '(mod_name)\n', (1840, 1850), False, 'import importlib\n'), ((461, 481), 'os.path.exists', 'os.path.exists', (['pdir'], {}), '(pdir)\n', (475, 481), False, 'import os\n'), ((491, 508), 'os.makedirs', 'os.makedirs', (['pdir'], {}), '(pdir)\n', (502, 508), False, 'import os\n'), ((1342, 1385), 'docutils.statemachine.string2lines', 'string2lines', (['rst_all'], {'convert_whitespace': '(1)'}), '(rst_all, convert_whitespace=1)\n', (1354, 1385), False, 'from docutils.statemachine import string2lines\n'), ((2030, 2053), 'os.path.exists', 'os.path.exists', (['mod_dir'], {}), '(mod_dir)\n', (2044, 2053), False, 'import os\n'), ((2063, 2083), 'os.makedirs', 'os.makedirs', (['mod_dir'], {}), '(mod_dir)\n', (2074, 2083), False, 'import os\n'), ((3335, 3364), 'os.path.join', 'os.path.join', (['srcdir', 'srcfile'], {}), '(srcdir, srcfile)\n', (3347, 3364), False, 'import os\n'), ((3663, 3686), 'os.path.exists', 'os.path.exists', (['srcfile'], {}), '(srcfile)\n', (3677, 3686), False, 'import os\n'), ((3700, 3758), 'logging.warning', 'logging.warning', (["('[genfortran] file not found: ' + srcfile)"], {}), "('[genfortran] file not found: ' + srcfile)\n", (3715, 3758), False, 'import logging\n'), ((4091, 4181), 're.match', 're.match', (["('^' + indent + '\\\\s+(?P<mod_name>[\\\\w.]+)' + '(?P<mod_desc>\\\\s.*)\\\\n$')", 'line'], {}), "('^' + indent + '\\\\s+(?P<mod_name>[\\\\w.]+)' +\n '(?P<mod_desc>\\\\s.*)\\\\n$', line)\n", (4099, 4181), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
"""
co2usa_load_netCDF: Load the CO2-USA Data Synthesis files from netCDF
USAGE:
The CO2-USA synthesis data is available to download from the ORNL DAAC:
https://doi.org/10.3334/ORNLDAAC/1743
To download the data, first sign into your account (or create one if you don't have one).
Next, click on "Download Data" to download the entire data set in a zip file.
Extract the netCDF files to a folder on your computer.
The CO2-USA synthesis data files should be all saved in a single directory:
/co2_usa_netCDF_files/[netCDF_files.nc]
For example, for the CO2 data file for a Boston site would be:
/co2_usa_netCDF_files/boston_co2_HF_29m_1_hour_R0_2020-09-28.nc
Set the following variables:
city: String of CO2-USA city. Example:
city = 'boston'
species: String with target species. Example:
species = 'co2'
read_folder: Path to the directory where you saved the data files. Example:
current_folder = os.getcwd()
read_folder = current_folder+'\\netCDF_formatted_files\\'
The data is in the 'co2usa' variable.
For more information, visit the CO2-USA GitHub repository:
https://github.com/loganemitchell/co2usa_data_synthesis
Written by <NAME> (<EMAIL>)
University of Utah
Last updated: 2021-06-09
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import glob
import netCDF4 as nc
#%% Executed this manually to enable interactive figures:
#%matplotlib qt
#%%
current_folder = os.getcwd()
read_folder = current_folder+'\\gcloud.utah.edu\\data\\co2-usa\\synthesis_output_ornl_new\\netCDF_formatted_files\\'
co2usa = {}
city = 'boston'
species = 'co2'
co2usa[city] = {}
all_files = glob.glob(read_folder+city+'_'+species+'*.nc')
for fni in range(len(all_files)):
#print('Loading '+all_files[fni])
nc_dat = nc.Dataset(all_files[fni])
site = all_files[fni][len(read_folder):all_files[fni].find('_1_hour')]
co2usa[city][site] = {}
co2usa[city][site]['global_attributes'] = {} # Site global attributes
for name in nc_dat.ncattrs():
co2usa[city][site]['global_attributes'][name] = getattr(nc_dat, name)
#print("Global attr {} = {}".format(name, getattr(nc_dat, name)))
co2usa[city][site]['attributes'] = {} # Variable attributes
for name in nc_dat.variables.keys():
co2usa[city][site]['attributes'][name] = {}
for attrname in nc_dat.variables[name].ncattrs():
co2usa[city][site]['attributes'][name][attrname] = getattr(nc_dat.variables[name], attrname)
#print("{} -- {}".format(attrname, getattr(nc_dat.variables[name], attrname)))
for name in nc_dat.variables.keys(): # Variable data
co2usa[city][site][name] = nc_dat.variables[name][:].data
# Convert to datetime
co2usa[city][site]['time'] = pd.to_datetime(co2usa[city][site]['time']*1e9)
# Take care of NaNs
co2usa[city][site][species][co2usa[city][site][species]==co2usa[city][site]['attributes'][species]['_FillValue']] = np.nan
# Remove the temporary netCDF variable
del nc_dat
#%% Plot the CO2 USA data
sites = co2usa[city].keys()
f1 = plt.figure(1); f1 = plt.clf(); ax = plt.axes(f1)
plt.title(city+' '+species,fontsize=20)
for site in sites:
if site.find('background') == -1:
plt.plot(co2usa[city][site]['time'],co2usa[city][site][species],label=site)
for site in sites:
if site.find('background') != -1:
plt.plot(co2usa[city][site]['time'],co2usa[city][site][species],'k-',label=site)
ax.set_ylabel(species,fontsize=15)
plt.legend(fontsize=14)
plt.grid(b=True,axis='both')
plt.show()
|
[
"matplotlib.pyplot.title",
"netCDF4.Dataset",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.getcwd",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"pandas.to_datetime",
"glob.glob",
"matplotlib.pyplot.grid"
] |
[((1463, 1474), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1472, 1474), False, 'import os\n'), ((1669, 1723), 'glob.glob', 'glob.glob', (["(read_folder + city + '_' + species + '*.nc')"], {}), "(read_folder + city + '_' + species + '*.nc')\n", (1678, 1723), False, 'import glob\n'), ((3131, 3144), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3141, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3151, 3160), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3158, 3160), True, 'import matplotlib.pyplot as plt\n'), ((3167, 3179), 'matplotlib.pyplot.axes', 'plt.axes', (['f1'], {}), '(f1)\n', (3175, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3224), 'matplotlib.pyplot.title', 'plt.title', (["(city + ' ' + species)"], {'fontsize': '(20)'}), "(city + ' ' + species, fontsize=20)\n", (3189, 3224), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3565), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (3552, 3565), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3595), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'axis': '"""both"""'}), "(b=True, axis='both')\n", (3574, 3595), True, 'import matplotlib.pyplot as plt\n'), ((3596, 3606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3604, 3606), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1832), 'netCDF4.Dataset', 'nc.Dataset', (['all_files[fni]'], {}), '(all_files[fni])\n', (1816, 1832), True, 'import netCDF4 as nc\n'), ((2813, 2870), 'pandas.to_datetime', 'pd.to_datetime', (["(co2usa[city][site]['time'] * 1000000000.0)"], {}), "(co2usa[city][site]['time'] * 1000000000.0)\n", (2827, 2870), True, 'import pandas as pd\n'), ((3285, 3362), 'matplotlib.pyplot.plot', 'plt.plot', (["co2usa[city][site]['time']", 'co2usa[city][site][species]'], {'label': 'site'}), "(co2usa[city][site]['time'], co2usa[city][site][species], label=site)\n", (3293, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3426, 3513), 'matplotlib.pyplot.plot', 'plt.plot', (["co2usa[city][site]['time']", 'co2usa[city][site][species]', '"""k-"""'], {'label': 'site'}), "(co2usa[city][site]['time'], co2usa[city][site][species], 'k-',\n label=site)\n", (3434, 3513), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest import test
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base
from tempest.common import tempest_fixtures as fixtures
CONF = config.CONF
class BgpSpeakerTestJSONBase(base.BaseAdminNetworkTest):
default_bgp_speaker_args = {'local_as': '1234',
'ip_version': 4,
'name': 'my-bgp-speaker',
'advertise_floating_ip_host_routes': True,
'advertise_tenant_networks': True}
default_bgp_peer_args = {'remote_as': '4321',
'name': 'my-bgp-peer',
'peer_ip': '192.168.1.1',
'auth_type': '<PASSWORD>', 'password': '<PASSWORD>'}
@classmethod
def resource_setup(cls):
super(BgpSpeakerTestJSONBase, cls).resource_setup()
if not test.is_extension_enabled('bgp_speaker', 'network'):
msg = "BGP Speaker extension is not enabled."
raise cls.skipException(msg)
cls.ext_net_id = CONF.network.public_network_id
def create_bgp_speaker(self, auto_delete=True, **args):
data = {'bgp_speaker': args}
bgp_speaker = self.admin_client.create_bgp_speaker(data)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
if auto_delete:
self.addCleanup(self.delete_bgp_speaker, bgp_speaker_id)
return bgp_speaker
def create_bgp_peer(self, **args):
bgp_peer = self.admin_client.create_bgp_peer({'bgp_peer': args})
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.addCleanup(self.delete_bgp_peer, bgp_peer_id)
return bgp_peer
def update_bgp_speaker(self, id, **args):
data = {'bgp_speaker': args}
return self.admin_client.update_bgp_speaker(id, data)
def delete_bgp_speaker(self, id):
return self.admin_client.delete_bgp_speaker(id)
def get_bgp_speaker(self, id):
return self.admin_client.get_bgp_speaker(id)
def create_bgp_speaker_and_peer(self):
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
return (bgp_speaker, bgp_peer)
def delete_bgp_peer(self, id):
return self.admin_client.delete_bgp_peer(id)
def add_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.add_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def remove_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.remove_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def delete_address_scope(self, id):
return self.admin_client.delete_address_scope(id)
class BgpSpeakerTestJSON(BgpSpeakerTestJSONBase):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
Create bgp-speaker
Delete bgp-speaker
Create bgp-peer
Update bgp-peer
Delete bgp-peer
"""
@test.idempotent_id('df259771-7104-4ffa-b77f-bd183600d7f9')
def test_delete_bgp_speaker(self):
bgp_speaker = self.create_bgp_speaker(auto_delete=False,
**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.delete_bgp_speaker(bgp_speaker_id)
self.assertRaises(lib_exc.NotFound,
self.get_bgp_speaker,
bgp_speaker_id)
@test.idempotent_id('81d9dc45-19f8-4c6e-88b8-401d965cd1b0')
def test_create_bgp_peer(self):
self.create_bgp_peer(**self.default_bgp_peer_args)
@test.idempotent_id('6ade0319-1ee2-493c-ac4b-5eb230ff3a77')
def test_add_bgp_peer(self):
bgp_speaker, bgp_peer = self.create_bgp_speaker_and_peer()
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertEqual(1, len(bgp_peers_list))
self.assertTrue(bgp_peer_id in bgp_peers_list)
@test.idempotent_id('f9737708-1d79-440b-8350-779f97d882ee')
def test_remove_bgp_peer(self):
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
bgp_peer_id = bgp_peer['bgp-peer']['id']
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(bgp_peer_id in bgp_peers_list)
bgp_speaker = self.remove_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(not bgp_peers_list)
@test.idempotent_id('23c8eb37-d10d-4f43-b2e7-6542cb6a4405')
def test_add_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertEqual(1, len(network_list))
self.assertTrue(self.ext_net_id in network_list)
@test.idempotent_id('6cfc7137-0d99-4a3d-826c-9d1a3a1767b0')
def test_remove_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
networks = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(self.ext_net_id in networks)
self.admin_client.remove_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(not network_list)
|
[
"tempest.common.tempest_fixtures.LockFixture",
"tempest.test.is_extension_enabled",
"tempest.test.idempotent_id"
] |
[((3813, 3871), 'tempest.test.idempotent_id', 'test.idempotent_id', (['"""df259771-7104-4ffa-b77f-bd183600d7f9"""'], {}), "('df259771-7104-4ffa-b77f-bd183600d7f9')\n", (3831, 3871), False, 'from tempest import test\n'), ((4301, 4359), 'tempest.test.idempotent_id', 'test.idempotent_id', (['"""81d9dc45-19f8-4c6e-88b8-401d965cd1b0"""'], {}), "('81d9dc45-19f8-4c6e-88b8-401d965cd1b0')\n", (4319, 4359), False, 'from tempest import test\n'), ((4461, 4519), 'tempest.test.idempotent_id', 'test.idempotent_id', (['"""6ade0319-1ee2-493c-ac4b-5eb230ff3a77"""'], {}), "('6ade0319-1ee2-493c-ac4b-5eb230ff3a77')\n", (4479, 4519), False, 'from tempest import test\n'), ((5026, 5084), 'tempest.test.idempotent_id', 'test.idempotent_id', (['"""f9737708-1d79-440b-8350-779f97d882ee"""'], {}), "('f9737708-1d79-440b-8350-779f97d882ee')\n", (5044, 5084), False, 'from tempest import test\n'), ((5876, 5934), 'tempest.test.idempotent_id', 'test.idempotent_id', (['"""23c8eb37-d10d-4f43-b2e7-6542cb6a4405"""'], {}), "('23c8eb37-d10d-4f43-b2e7-6542cb6a4405')\n", (5894, 5934), False, 'from tempest import test\n'), ((6563, 6621), 'tempest.test.idempotent_id', 'test.idempotent_id', (['"""6cfc7137-0d99-4a3d-826c-9d1a3a1767b0"""'], {}), "('6cfc7137-0d99-4a3d-826c-9d1a3a1767b0')\n", (6581, 6621), False, 'from tempest import test\n'), ((1574, 1625), 'tempest.test.is_extension_enabled', 'test.is_extension_enabled', (['"""bgp_speaker"""', '"""network"""'], {}), "('bgp_speaker', 'network')\n", (1599, 1625), False, 'from tempest import test\n'), ((5999, 6046), 'tempest.common.tempest_fixtures.LockFixture', 'fixtures.LockFixture', (['"""gateway_network_binding"""'], {}), "('gateway_network_binding')\n", (6019, 6046), True, 'from tempest.common import tempest_fixtures as fixtures\n'), ((6689, 6736), 'tempest.common.tempest_fixtures.LockFixture', 'fixtures.LockFixture', (['"""gateway_network_binding"""'], {}), "('gateway_network_binding')\n", (6709, 6736), True, 'from tempest.common import tempest_fixtures as fixtures\n')]
|
from PIL import Image, ImageDraw, ImageFont
class ImageAnnotator(object):
def __init__(self,
img_path: str,
font: str = None,
font_size: int = 5):
assert isinstance(img_path, str)
self._img = Image.open(img_path).convert('RGBA')
self._img_draw = ImageDraw.Draw(self._img)
if font is None:
self._font = None
else:
assert isinstance(font, str)
self._font = ImageFont.truetype(font, font_size)
def image(self):
return self._img.convert('RGB')
def save(self, img_path):
self._img.convert('RGB').save(img_path)
def draw_line(self,
points: list,
fill: str = None,
width: int = 1):
"""
Draw a line on image
"""
assert isinstance(points, (list, tuple)) and len(points) == 2
for pair in points:
assert isinstance(pair, tuple) and len(pair) == 2
self._img_draw.line(points, fill, width)
def draw_rectangle(self,
points: list,
outline: str = None,
width: int = 1,
text: str = None,
text_fill: str = None):
"""
Draw detection bounding box with text
"""
assert isinstance(points, (list, tuple))
assert len(points) == 2 or len(points) == 4
for pair in points:
assert len(pair) == 2
if len(points) == 4:
points = [points[0], points[2]]
self._img_draw.rectangle(points, outline=outline, width=width)
if text is not None:
assert isinstance(text, str)
text_points = (points[0][0], points[1][1])
self.draw_text(points=text_points,
text=text,
fill=text_fill)
def draw_polygon(self,
points: list,
outline: str = None,
width: int = 1,
text: str = None,
text_fill: str = None):
"""
Draw polygon with text
"""
assert isinstance(points, (tuple, list)) and len(points) > 2
for pair in points:
assert isinstance(pair, tuple) and len(pair) == 2
for i in range(len(points)):
line_pts = (points[i], points[(i+1) % len(points)])
self.draw_line(points=line_pts,
fill=outline,
width=width
)
if text is not None:
assert isinstance(text, str)
self.draw_text(points=points[0],
text=text,
fill=text_fill)
def draw_text(self,
points: tuple,
text: str,
fill: str = None,
):
"""
Draw text on image
"""
assert isinstance(points, tuple) and len(points) == 2
self._img_draw.text(points, text, font=self._font, fill=fill)
|
[
"PIL.ImageDraw.Draw",
"PIL.ImageFont.truetype",
"PIL.Image.open"
] |
[((328, 353), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self._img'], {}), '(self._img)\n', (342, 353), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((490, 525), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font', 'font_size'], {}), '(font, font_size)\n', (508, 525), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((266, 286), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (276, 286), False, 'from PIL import Image, ImageDraw, ImageFont\n')]
|
import pytest
def test_fx_cycle(argv, GPIO):
"""Test that set_sequence supports the output of a PlasmaFX Sequence"""
from plasma import auto
from plasma.apa102 import PlasmaAPA102
from plasmafx import Sequence
from plasmafx.plugins import FXCycle
sequence = Sequence(10)
sequence.set_plugin(0, FXCycle())
plasma = auto("APA102:14:15:pixel_count=10")
plasma.set_sequence(sequence.get_pixels())
assert isinstance(plasma, PlasmaAPA102)
|
[
"plasmafx.Sequence",
"plasmafx.plugins.FXCycle",
"plasma.auto"
] |
[((285, 297), 'plasmafx.Sequence', 'Sequence', (['(10)'], {}), '(10)\n', (293, 297), False, 'from plasmafx import Sequence\n'), ((350, 385), 'plasma.auto', 'auto', (['"""APA102:14:15:pixel_count=10"""'], {}), "('APA102:14:15:pixel_count=10')\n", (354, 385), False, 'from plasma import auto\n'), ((325, 334), 'plasmafx.plugins.FXCycle', 'FXCycle', ([], {}), '()\n', (332, 334), False, 'from plasmafx.plugins import FXCycle\n')]
|
from rest_framework import serializers
from meeting.models import Meeting
class BaseSerializer(serializers.Serializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class NewMeetingIn(BaseSerializer):
name = serializers.CharField(max_length=128)
begin_at = serializers.DateTimeField()
end_at = serializers.DateTimeField()
mute_type = serializers.ChoiceField(choices=Meeting.MuteType.choices, required=False)
password = serializers.CharField(required=False)
class NewMeetingOut(serializers.ModelSerializer):
number = serializers.SerializerMethodField()
class Meta:
model = Meeting
fields = ('number', 'created')
def get_number(self, obj):
return obj.call_number
class BaseMeetingOut(BaseSerializer):
success = serializers.BooleanField()
class MeetingInfoIn(BaseSerializer):
number = serializers.IntegerField(help_text='Call number of meeting')
class MeetingInfoOut(serializers.ModelSerializer):
owner_id = serializers.SerializerMethodField()
owner_name = serializers.SerializerMethodField()
number = serializers.SerializerMethodField()
class Meta:
model = Meeting
fields = ('name', 'number', 'password', 'owner_name', 'owner_id', 'status', 'begin_at', 'end_at')
def get_owner_name(self, obj):
if obj.owner is None:
return None
return obj.owner.username
def get_owner_id(self, obj):
if obj.owner is None:
return None
return obj.owner.id
def get_number(self, obj):
return obj.call_number
class MeetingListIn(BaseSerializer):
beginAt = serializers.DateTimeField(required=False, help_text='Time with zone, etc: 2021-08-12T07:56:41+08:00')
endAt = serializers.DateTimeField(required=False)
class DelMeetingIn(BaseSerializer):
meetings = serializers.ListField(child=serializers.IntegerField(), help_text="list of meeting' number to delete")
class JoinMeetingIn(BaseSerializer):
number = serializers.IntegerField()
password = serializers.CharField(required=False)
class MeetingIn(BaseSerializer):
number = serializers.IntegerField()
class JoinMeetingOut(BaseSerializer):
token = serializers.CharField()
app_key = serializers.CharField()
room_id = serializers.IntegerField()
share_user_id = serializers.IntegerField()
share_user_token = serializers.CharField()
is_breakout = serializers.BooleanField(default=False)
|
[
"rest_framework.serializers.ChoiceField",
"rest_framework.serializers.SerializerMethodField",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.CharField",
"rest_framework.serializers.BooleanField",
"rest_framework.serializers.DateTimeField"
] |
[((285, 322), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (306, 322), False, 'from rest_framework import serializers\n'), ((338, 365), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (363, 365), False, 'from rest_framework import serializers\n'), ((379, 406), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (404, 406), False, 'from rest_framework import serializers\n'), ((423, 496), 'rest_framework.serializers.ChoiceField', 'serializers.ChoiceField', ([], {'choices': 'Meeting.MuteType.choices', 'required': '(False)'}), '(choices=Meeting.MuteType.choices, required=False)\n', (446, 496), False, 'from rest_framework import serializers\n'), ((512, 549), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)'}), '(required=False)\n', (533, 549), False, 'from rest_framework import serializers\n'), ((615, 650), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (648, 650), False, 'from rest_framework import serializers\n'), ((848, 874), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {}), '()\n', (872, 874), False, 'from rest_framework import serializers\n'), ((927, 987), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'help_text': '"""Call number of meeting"""'}), "(help_text='Call number of meeting')\n", (951, 987), False, 'from rest_framework import serializers\n'), ((1056, 1091), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1089, 1091), False, 'from rest_framework import serializers\n'), ((1109, 1144), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1142, 1144), False, 'from rest_framework import serializers\n'), ((1158, 1193), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1191, 1193), False, 'from rest_framework import serializers\n'), ((1697, 1803), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'required': '(False)', 'help_text': '"""Time with zone, etc: 2021-08-12T07:56:41+08:00"""'}), "(required=False, help_text=\n 'Time with zone, etc: 2021-08-12T07:56:41+08:00')\n", (1722, 1803), False, 'from rest_framework import serializers\n'), ((1811, 1852), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'required': '(False)'}), '(required=False)\n', (1836, 1852), False, 'from rest_framework import serializers\n'), ((2061, 2087), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2085, 2087), False, 'from rest_framework import serializers\n'), ((2103, 2140), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)'}), '(required=False)\n', (2124, 2140), False, 'from rest_framework import serializers\n'), ((2189, 2215), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2213, 2215), False, 'from rest_framework import serializers\n'), ((2268, 2291), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2289, 2291), False, 'from rest_framework import serializers\n'), ((2306, 2329), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2327, 2329), False, 'from rest_framework import serializers\n'), ((2344, 2370), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2368, 2370), False, 'from rest_framework import serializers\n'), ((2391, 2417), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2415, 2417), False, 'from rest_framework import serializers\n'), ((2441, 2464), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2462, 2464), False, 'from rest_framework import serializers\n'), ((2483, 2522), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2507, 2522), False, 'from rest_framework import serializers\n'), ((1934, 1960), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (1958, 1960), False, 'from rest_framework import serializers\n')]
|
import string
import random
import hashlib
import os
# import requests
import urllib
from typing import Union
import time
def steady_download_and_compute_hash(url: str, algorithm: str, target_path: str) -> str:
remote = urllib.request.urlopen(url)
str0 = ''.join(random.sample(string.ascii_lowercase, 8))
path_tmp = target_path + '.tmp.' + str0
hh = getattr(hashlib, algorithm)()
with open(path_tmp, 'wb') as f:
while True:
chunk = remote.read(4096)
if not chunk:
break
hh.update(chunk)
f.write(chunk)
os.rename(path_tmp, target_path)
hash0 = hh.hexdigest()
return hash0
## somehow this was not always working -- some bits were wrong for large files!
def old_steady_download_and_compute_hash(url: str, algorithm: str, target_path: str, chunk_size: int=1024 * 1024 * 40) -> str:
response = requests.head(url)
size_bytes = int(response.headers['content-length'])
str0 = ''.join(random.sample(string.ascii_lowercase, 8))
path_tmp = target_path + '.tmp.' + str0
try:
hh = getattr(hashlib, algorithm)()
with open(path_tmp, 'wb') as f:
for ii in range(0, size_bytes, chunk_size):
jj = ii + chunk_size
if jj > size_bytes:
jj = size_bytes
headers = {
'Range': 'bytes={}-{}'.format(ii, jj - 1)
}
response = requests.get(url, headers=headers, stream=True)
for chunk in response.iter_content(chunk_size=5120):
if chunk: # filter out keep-alive new chunks
hh.update(chunk)
f.write(chunk)
os.rename(path_tmp, target_path)
hash0 = hh.hexdigest()
return hash0
except:
if os.path.exists(path_tmp):
os.remove(path_tmp)
raise
|
[
"os.remove",
"random.sample",
"os.rename",
"os.path.exists",
"urllib.request.urlopen"
] |
[((225, 252), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (247, 252), False, 'import urllib\n'), ((600, 632), 'os.rename', 'os.rename', (['path_tmp', 'target_path'], {}), '(path_tmp, target_path)\n', (609, 632), False, 'import os\n'), ((272, 312), 'random.sample', 'random.sample', (['string.ascii_lowercase', '(8)'], {}), '(string.ascii_lowercase, 8)\n', (285, 312), False, 'import random\n'), ((995, 1035), 'random.sample', 'random.sample', (['string.ascii_lowercase', '(8)'], {}), '(string.ascii_lowercase, 8)\n', (1008, 1035), False, 'import random\n'), ((1744, 1776), 'os.rename', 'os.rename', (['path_tmp', 'target_path'], {}), '(path_tmp, target_path)\n', (1753, 1776), False, 'import os\n'), ((1852, 1876), 'os.path.exists', 'os.path.exists', (['path_tmp'], {}), '(path_tmp)\n', (1866, 1876), False, 'import os\n'), ((1890, 1909), 'os.remove', 'os.remove', (['path_tmp'], {}), '(path_tmp)\n', (1899, 1909), False, 'import os\n')]
|
import sys
sys.path.insert(0, r'C:\Users\Brooks\github\splitr')# access library code from outside /models
# library functions:
import torch
import time
import pandas as pd
# Splitr modules:
import model_utils
# this is really a constructor for a bidirectional LSTM but i figured
# BD_LSTM was only 2 letters off of BDSM so why not
class BDSM(torch.nn.Module):
def __init__(self, num_inputs, num_hidden_layers,char_in, char_out, layer_count=1):
super(BDSM, self).__init__()
self.char_out = char_out
# make the last layer not have a linear layer inside
self.rnn = torch.nn.LSTM(num_inputs, num_hidden_layers, num_layers=layer_count, bidirectional=True, batch_first=True)
self.linear = torch.nn.Linear(char_in, char_out)
self.relu = torch.nn.ReLU()
def forward(self, x):
# print('>>starting rnn that has output chars of', x.shape)
rnn_output, _ = self.rnn(x)
# print('raw rnn out', rnn_output.shape)
batch, char_count, depth = rnn_output.shape
rnn_output = rnn_output.contiguous().view(batch*depth, char_count)
# print('reshaped rnn out', rnn_output.shape)
linear = self.linear(rnn_output)
output = linear.view(batch, self.char_out, depth)
# print('after linear shape', output.shape)
output =self.relu(output)
return output
# Convolution cell with adjustable activation / maxpool size / batchnorm
class CNN_cell(torch.nn.Module):
def __init__(self,in_channels=False,out_channels=False,kernel_size=3,activation=False, pool_shape=False, pool_stride=False, batchnorm=False):
super(CNN_cell, self).__init__()
_layers = []
if in_channels and out_channels:
_layers.append(torch.nn.Conv2d(in_channels, out_channels,kernel_size))
if activation:
_layers.append(self.find_activation(activation))
if batchnorm:
_layers.append(torch.nn.BatchNorm2d(batchnorm))
if pool_shape and pool_stride:
_layers.append(torch.nn.MaxPool2d(pool_shape, pool_stride))
self.cnn = torch.nn.Sequential(*_layers)
def find_activation(self, activation):
if activation == 'relu':
return torch.nn.ReLU()
elif activation == 'tanh':
return torch.nn.Tanh()
elif activation == 'leaky':
return torch.nn.LeakyReLU()
else:
print('activation function call |%s| is not configured' % activation )
def forward(self, input_tensor):
output = self.cnn(input_tensor)
return output
# https://arxiv.org/pdf/1507.05717.pdf
class model(torch.nn.Module):
def __init__(self, channel_count=1,num_hidden= 256, unique_char_count=57,rnn_layer_stack=1):
super(model, self).__init__()
# add dropout to cnn layers
self.softmax = torch.nn.LogSoftmax(dim=2)
# CONVOLUTIONS
_cnn_layer = []
_cnn_layer.append(CNN_cell(in_channels=1, out_channels=64, kernel_size=3, activation='relu', pool_shape=False, pool_stride=False))
_cnn_layer.append(CNN_cell(in_channels=64 , out_channels=128, kernel_size=3, activation='relu', pool_shape=(2,2), pool_stride=2))
_cnn_layer.append(CNN_cell(in_channels=128, out_channels=256, kernel_size=3, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=256, out_channels=512, kernel_size=3, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=3, activation='relu', pool_shape=(1,2), pool_stride=2))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu', batchnorm=512))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
# RNN LAYERS
_bdsm_layer = []# 2048
# _bdsm_layer.append(BDSM(num_inputs=512, num_hidden_layers=num_hidden, char_in=56,char_out=56, layer_count=rnn_layer_stack))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden, char_in=85,char_out=140, layer_count=1))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= 140, char_out=190, layer_count=1))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= 190, char_out=250, layer_count=1))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= 250, char_out=350, layer_count=1))
inc = 1.26
max_len = 80
current = 53
p = 0
while current < max_len:
p+=1
prev = current
current = int(inc * prev)
print(prev, current)
_bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= prev, char_out=current, layer_count=1))
print('number of rnns stacked %s' % p)
# CHAR activations (transcription)
self.linear = torch.nn.Sequential(
torch.nn.Linear(in_features=num_hidden*2, out_features=unique_char_count),torch.nn.ReLU())
self.cnn = torch.nn.Sequential(*_cnn_layer)
self.rnn = torch.nn.Sequential(*_bdsm_layer)
def forward(self, x):
t = self.cnn(x)
batch, depth, height, base = t.shape
# print('raw cnn shape: ', t.shape)
# import sys
# cnn_output = t.view(batch, height, depth*base)
cnn_output = t.view(batch, base, height*depth)
# print(' NEW after reshape', cnn_output.shape, type(cnn_output))
# sys.exit('exits')
rnn_output = self.rnn(cnn_output)
batch, char_len, depth = rnn_output.shape
rnn_output = rnn_output.contiguous().view(batch*char_len, depth)
# print('rnn output ', rnn_output.shape)
output = self.linear(rnn_output).view(batch, char_len, -1)
output = self.softmax(output)
return output
|
[
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.LogSoftmax",
"torch.nn.Tanh",
"torch.nn.Conv2d",
"sys.path.insert",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.LeakyReLU",
"torch.nn.LSTM"
] |
[((11, 66), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""C:\\\\Users\\\\Brooks\\\\github\\\\splitr"""'], {}), "(0, 'C:\\\\Users\\\\Brooks\\\\github\\\\splitr')\n", (26, 66), False, 'import sys\n'), ((577, 687), 'torch.nn.LSTM', 'torch.nn.LSTM', (['num_inputs', 'num_hidden_layers'], {'num_layers': 'layer_count', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(num_inputs, num_hidden_layers, num_layers=layer_count,\n bidirectional=True, batch_first=True)\n', (590, 687), False, 'import torch\n'), ((700, 734), 'torch.nn.Linear', 'torch.nn.Linear', (['char_in', 'char_out'], {}), '(char_in, char_out)\n', (715, 734), False, 'import torch\n'), ((749, 764), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (762, 764), False, 'import torch\n'), ((1922, 1951), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*_layers'], {}), '(*_layers)\n', (1941, 1951), False, 'import torch\n'), ((2572, 2598), 'torch.nn.LogSoftmax', 'torch.nn.LogSoftmax', ([], {'dim': '(2)'}), '(dim=2)\n', (2591, 2598), False, 'import torch\n'), ((4937, 4969), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*_cnn_layer'], {}), '(*_cnn_layer)\n', (4956, 4969), False, 'import torch\n'), ((4983, 5016), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*_bdsm_layer'], {}), '(*_bdsm_layer)\n', (5002, 5016), False, 'import torch\n'), ((2030, 2045), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2043, 2045), False, 'import torch\n'), ((4832, 4907), 'torch.nn.Linear', 'torch.nn.Linear', ([], {'in_features': '(num_hidden * 2)', 'out_features': 'unique_char_count'}), '(in_features=num_hidden * 2, out_features=unique_char_count)\n', (4847, 4907), False, 'import torch\n'), ((4906, 4921), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (4919, 4921), False, 'import torch\n'), ((1620, 1675), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {}), '(in_channels, out_channels, kernel_size)\n', (1635, 1675), False, 'import torch\n'), ((1779, 1810), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['batchnorm'], {}), '(batchnorm)\n', (1799, 1810), False, 'import torch\n'), ((1863, 1906), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['pool_shape', 'pool_stride'], {}), '(pool_shape, pool_stride)\n', (1881, 1906), False, 'import torch\n'), ((2085, 2100), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (2098, 2100), False, 'import torch\n'), ((2141, 2161), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (2159, 2161), False, 'import torch\n')]
|
# Modified from SUTD and https://github.com/bentrevett/pytorch-sentiment-analysis
# Sentiment Analysis on IMDB with FashionMNIST
# We're using packed sequences for training
# For more info: https://stackoverflow.com/questions/51030782/why-do-we-pack-the-sequences-in-pytorch
import torch.nn as nn
import torchtext
import torch
from torchtext.legacy import data
from torchtext.legacy import datasets
import torch.optim as optim
import random
import time
# MODEL ========================================================================
class BidirectionalLSTM(nn.Module):
def __init__(self,
input_dim,
embedding_dim=100,
hidden_dim=256,
output_dim=1,
n_layers=2,
bidirectional=True,
dropout=0.5,
pad_idx=0):
super().__init__()
self.embedding = nn.Embedding(input_dim,
embedding_dim,
padding_idx=pad_idx)
self.rnn = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
embedded = self.dropout(self.embedding(text)) # Map text to embedding
# Pack sequence
# Note: We move text_lengths to cpu due to a small bug
# https://github.com/pytorch/pytorch/issues/43227
packed_embedded = nn.utils.rnn.pack_padded_sequence(
embedded, text_lengths.cpu()
)
packed_output, (hidden, cell) = self.rnn(packed_embedded) # Feedforward
# Unpack sequence
output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
hidden = self.dropout(torch.cat((hidden[-2,:,:],
hidden[-1,:,:]),
dim = 1))
return self.fc(hidden)
# TRAINING UTILITIES ===========================================================
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
# Round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss, epoch_acc = 0, 0
model.train() # Set to training mode
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss, epoch_acc = 0, 0
model.eval() # Set to evaluation mode
with torch.no_grad(): # Don't track gradients
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
if __name__ == "__main__":
# MAKE DETERMINISTIC =======================================================
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# LOAD DATA ================================================================
# Spacy is good for tokenisation in other languages
TEXT = data.Field(tokenize = 'spacy', include_lengths = True)
LABEL = data.LabelField(dtype = torch.float)
# If slow, use this instead:
# def tokenize(s):
# return s.split(' ')
# TEXT = data.Field(tokenize=tokenize, include_lengths = True)
# Test-valid-train split
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
# Visualise
example = next(iter(test_data))
example.label
example.text
# Note: Using glove embeddings (~900mb)
TEXT.build_vocab(
test_data,
max_size = 25000,
vectors = "glove.6B.100d",
unk_init = torch.Tensor.normal_ # how to initialize unseen words not in glove
)
LABEL.build_vocab(test_data)
# Data iterators
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = 64,
sort_within_batch = True,
device = device)
# MODEL ====================================================================
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token] # Specifies index when word is missing
model = BidirectionalLSTM(input_dim=len(TEXT.vocab),
embedding_dim=100,
hidden_dim=256,
output_dim=1,
n_layers=2, # To make LSTM deep
bidirectional=True,
dropout=0.5,
pad_idx=PAD_IDX)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
count_parameters(model) # 4,810,857 (wow!)
# Copy embeddings to model
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
# Zero out <UNK> and <PAD> tokens
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
# TRAIN ====================================================================
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut2-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# WHEN DONE.. ==============================================================
model.load_state_dict(torch.load('tut2-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
# TRY WITH USER INPUT ======================================================
import spacy
nlp = spacy.load('en')
def predict_sentiment(model, sentence):
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
length = [len(indexed)]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor(length)
prediction = torch.sigmoid(model(tensor, length_tensor))
return prediction.item()
predict_sentiment(model, "This film is great")
|
[
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.no_grad",
"torch.load",
"spacy.load",
"random.seed",
"torch.nn.Linear",
"torch.zeros",
"torchtext.legacy.datasets.IMDB.splits",
"torch.nn.LSTM",
"torch.nn.BCEWithLogitsLoss",
"torchtext.legacy.data.LabelField",
"torch.manual_seed",
"torchtext.legacy.data.BucketIterator.splits",
"torch.cuda.is_available",
"torch.LongTensor",
"time.time",
"torch.sigmoid",
"torchtext.legacy.data.Field"
] |
[((4070, 4093), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (4087, 4093), False, 'import torch\n'), ((4289, 4339), 'torchtext.legacy.data.Field', 'data.Field', ([], {'tokenize': '"""spacy"""', 'include_lengths': '(True)'}), "(tokenize='spacy', include_lengths=True)\n", (4299, 4339), False, 'from torchtext.legacy import data\n'), ((4356, 4390), 'torchtext.legacy.data.LabelField', 'data.LabelField', ([], {'dtype': 'torch.float'}), '(dtype=torch.float)\n', (4371, 4390), False, 'from torchtext.legacy import data\n'), ((4605, 4638), 'torchtext.legacy.datasets.IMDB.splits', 'datasets.IMDB.splits', (['TEXT', 'LABEL'], {}), '(TEXT, LABEL)\n', (4625, 4638), False, 'from torchtext.legacy import datasets\n'), ((5228, 5350), 'torchtext.legacy.data.BucketIterator.splits', 'data.BucketIterator.splits', (['(train_data, valid_data, test_data)'], {'batch_size': '(64)', 'sort_within_batch': '(True)', 'device': 'device'}), '((train_data, valid_data, test_data), batch_size=\n 64, sort_within_batch=True, device=device)\n', (5254, 5350), False, 'from torchtext.legacy import data\n'), ((6380, 6406), 'torch.zeros', 'torch.zeros', (['EMBEDDING_DIM'], {}), '(EMBEDDING_DIM)\n', (6391, 6406), False, 'import torch\n'), ((6450, 6476), 'torch.zeros', 'torch.zeros', (['EMBEDDING_DIM'], {}), '(EMBEDDING_DIM)\n', (6461, 6476), False, 'import torch\n'), ((6622, 6644), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (6642, 6644), True, 'import torch.nn as nn\n'), ((7871, 7887), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (7881, 7887), False, 'import spacy\n'), ((903, 962), 'torch.nn.Embedding', 'nn.Embedding', (['input_dim', 'embedding_dim'], {'padding_idx': 'pad_idx'}), '(input_dim, embedding_dim, padding_idx=pad_idx)\n', (915, 962), True, 'import torch.nn as nn\n'), ((1059, 1165), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim'], {'num_layers': 'n_layers', 'bidirectional': 'bidirectional', 'dropout': 'dropout'}), '(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=\n bidirectional, dropout=dropout)\n', (1066, 1165), True, 'import torch.nn as nn\n'), ((1288, 1325), 'torch.nn.Linear', 'nn.Linear', (['(hidden_dim * 2)', 'output_dim'], {}), '(hidden_dim * 2, output_dim)\n', (1297, 1325), True, 'import torch.nn as nn\n'), ((1349, 1368), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1359, 1368), True, 'import torch.nn as nn\n'), ((1890, 1937), 'torch.nn.utils.rnn.pad_packed_sequence', 'nn.utils.rnn.pad_packed_sequence', (['packed_output'], {}), '(packed_output)\n', (1922, 1937), True, 'import torch.nn as nn\n'), ((2429, 2449), 'torch.sigmoid', 'torch.sigmoid', (['preds'], {}), '(preds)\n', (2442, 2449), False, 'import torch\n'), ((3286, 3301), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3299, 3301), False, 'import torch\n'), ((6822, 6833), 'time.time', 'time.time', ([], {}), '()\n', (6831, 6833), False, 'import time\n'), ((7013, 7024), 'time.time', 'time.time', ([], {}), '()\n', (7022, 7024), False, 'import time\n'), ((7591, 7618), 'torch.load', 'torch.load', (['"""tut2-model.pt"""'], {}), "('tut2-model.pt')\n", (7601, 7618), False, 'import torch\n'), ((8225, 8249), 'torch.LongTensor', 'torch.LongTensor', (['length'], {}), '(length)\n', (8241, 8249), False, 'import torch\n'), ((1969, 2023), 'torch.cat', 'torch.cat', (['(hidden[-2, :, :], hidden[-1, :, :])'], {'dim': '(1)'}), '((hidden[-2, :, :], hidden[-1, :, :]), dim=1)\n', (1978, 2023), False, 'import torch\n'), ((4700, 4717), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (4711, 4717), False, 'import random\n'), ((5137, 5162), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5160, 5162), False, 'import torch\n'), ((8127, 8152), 'torch.LongTensor', 'torch.LongTensor', (['indexed'], {}), '(indexed)\n', (8143, 8152), False, 'import torch\n')]
|
##################################################
## Notify VMDK export request
##################################################
import os
import boto3
from botocore.exceptions import ClientError
import json
import logging
def lambda_handler(event, context):
# set logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# print the event details
logger.debug(json.dumps(event, indent=2))
# get state machine arn from env vars
state_machine_arn = os.environ['STATE_MACHINE_ARN']
image_build_version_arn = event["Records"][0]["Sns"]["Message"]
stepfunctions_client = boto3.client('stepfunctions')
response = stepfunctions_client.list_executions(
stateMachineArn=state_machine_arn,
statusFilter='RUNNING',
maxResults=1000
)
if len(response['executions']) > 0:
return image_build_version_arn
response = stepfunctions_client.start_execution(
stateMachineArn=state_machine_arn,
input="{\"image_build_version_arn\" : \"" + image_build_version_arn + "\"}"
)
return image_build_version_arn
|
[
"json.dumps",
"logging.getLogger",
"boto3.client"
] |
[((295, 314), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (312, 314), False, 'import logging\n'), ((623, 652), 'boto3.client', 'boto3.client', (['"""stepfunctions"""'], {}), "('stepfunctions')\n", (635, 652), False, 'import boto3\n'), ((398, 425), 'json.dumps', 'json.dumps', (['event'], {'indent': '(2)'}), '(event, indent=2)\n', (408, 425), False, 'import json\n')]
|
from lensesio.core.endpoints import getEndpoints
from lensesio.core.exec_action import exec_request
class Policy:
def __init__(self, verify_cert=True):
getEndpoints.__init__(self, "policyEndpoints")
self.verify_cert=verify_cert
self.lenses_policies_endpoint = self.url + self.lensesPoliciesEndpoint
self.policy_headers = {
'Content-Type': 'application/json',
'Accept': 'text/plain application/json',
'x-kafka-lenses-token': self.token}
def ViewPolicy(self):
self.viewPolicy = exec_request(
__METHOD="get",
__EXPECTED="json",
__URL=self.lenses_policies_endpoint,
__HEADERS=self.policy_headers,
__VERIFY=self.verify_cert
)
return self.viewPolicy
def SetPolicy(self, name, obfuscation, impactType, category, fields):
if type(fields) is not list:
fields = [fields]
params = dict(
name=name,
obfuscation=obfuscation,
impactType=impactType,
category=category,
fields=fields
)
self.setPolicy = exec_request(
__METHOD="post",
__EXPECTED="text",
__URL=self.lenses_policies_endpoint,
__HEADERS=self.policy_headers,
__DATA=params,
__VERIFY=self.verify_cert
)
return self.setPolicy
def DelPolicy(self, name):
policies = self.ViewPolicy()
for e in policies:
if e['name'] == name:
policy_id = e['id']
break
else:
policy_id = None
if policy_id:
_REQ = self.lenses_policies_endpoint + '/' + policy_id
self.delPolicy = exec_request(
__METHOD="delete",
__EXPECTED="text",
__URL=_REQ,
__HEADERS=self.policy_headers,
__VERIFY=self.verify_cert
)
else:
return "No policy with name %s" % name
return self.delPolicy
|
[
"lensesio.core.exec_action.exec_request",
"lensesio.core.endpoints.getEndpoints.__init__"
] |
[((166, 212), 'lensesio.core.endpoints.getEndpoints.__init__', 'getEndpoints.__init__', (['self', '"""policyEndpoints"""'], {}), "(self, 'policyEndpoints')\n", (187, 212), False, 'from lensesio.core.endpoints import getEndpoints\n'), ((564, 716), 'lensesio.core.exec_action.exec_request', 'exec_request', ([], {'__METHOD': '"""get"""', '__EXPECTED': '"""json"""', '__URL': 'self.lenses_policies_endpoint', '__HEADERS': 'self.policy_headers', '__VERIFY': 'self.verify_cert'}), "(__METHOD='get', __EXPECTED='json', __URL=self.\n lenses_policies_endpoint, __HEADERS=self.policy_headers, __VERIFY=self.\n verify_cert)\n", (576, 716), False, 'from lensesio.core.exec_action import exec_request\n'), ((1162, 1329), 'lensesio.core.exec_action.exec_request', 'exec_request', ([], {'__METHOD': '"""post"""', '__EXPECTED': '"""text"""', '__URL': 'self.lenses_policies_endpoint', '__HEADERS': 'self.policy_headers', '__DATA': 'params', '__VERIFY': 'self.verify_cert'}), "(__METHOD='post', __EXPECTED='text', __URL=self.\n lenses_policies_endpoint, __HEADERS=self.policy_headers, __DATA=params,\n __VERIFY=self.verify_cert)\n", (1174, 1329), False, 'from lensesio.core.exec_action import exec_request\n'), ((1784, 1909), 'lensesio.core.exec_action.exec_request', 'exec_request', ([], {'__METHOD': '"""delete"""', '__EXPECTED': '"""text"""', '__URL': '_REQ', '__HEADERS': 'self.policy_headers', '__VERIFY': 'self.verify_cert'}), "(__METHOD='delete', __EXPECTED='text', __URL=_REQ, __HEADERS=\n self.policy_headers, __VERIFY=self.verify_cert)\n", (1796, 1909), False, 'from lensesio.core.exec_action import exec_request\n')]
|
from setuptools import find_packages, setup
with open("README.md") as readme_file:
readme = readme_file.read()
with open("requirements.txt") as reqs_file:
requirements = reqs_file.read().split("\n")
setup(
name="braincode",
version="0.1.0",
description="an investigation of computer program representations.",
long_description=readme,
author="anonymous1 anonymous1",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(where="braincode"),
install_requires=requirements,
python_requires=">=3.7",
)
|
[
"setuptools.find_packages"
] |
[((458, 490), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""braincode"""'}), "(where='braincode')\n", (471, 490), False, 'from setuptools import find_packages, setup\n')]
|
# Data Worker
# %%
import os
import pandas as pd
import plotly.express as px
from pypinyin import lazy_pinyin
locations_url = 'https://blog.csdn.net/envbox/article/details/80290103'
filename = 'locations.json'
sync_folder = os.environ.get('Sync', '.')
mapbox = dict(
mapbox_accesstoken=open(os.path.join(
os.environ['onedrive'], '.mapbox_token')).read(),
mapbox_style='light'
)
def fetch_locations():
locations = pd.read_html(locations_url)[0]
locations.columns = ['Province', 'ID', 'Name',
'Latitude', 'Longitude', 'Height']
# Fix Known Issue,
# use height - 10000 if height is greater than 10000
locations.Height = locations.Height.map(lambda e: e % 10000)
def translate(s):
return ''.join(lazy_pinyin(s))
locations['_province'] = locations['Province'].map(translate)
locations['_name'] = locations['Name'].map(translate)
locations = locations[['ID', 'Province', 'Name',
'Latitude', 'Longitude', 'Height',
'_province', '_name']]
return locations
class DataWorker(object):
def __init__(self):
self.locations = fetch_locations()
self.columns = self.locations.columns
self.plot_mapbox(self.locations.copy())
def search_by_pinyin(self, py):
found = dict()
if py.strip() == '':
found['_name'] = self.locations.copy()
found['_province'] = pd.DataFrame()
else:
for col in ['_province', '_name']:
found[col] = self.locations[self.locations[col].str.startswith(
py)]
output = pd.concat([found['_name'], found['_province']], axis=0)
self.plot_mapbox(output.copy())
return output
def plot_mapbox(self, df):
print('Reploting')
df['ID'] = df['ID'].map(str)
df['Text'] = df[['Province', 'Name', 'ID']].apply(', '.join, axis=1)
fig = px.scatter_mapbox(
df,
lon='Longitude',
lat='Latitude',
color='Province',
# size=3,
hover_name='Text',
zoom=2,
height=300
)
fig.update_layout(**mapbox)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
self.canvas = fig.to_html()
return self.canvas
# %%
# dw = DataWorker()
# dw.locations
# %%
# dw.search_by_pinyin('bei')
|
[
"pandas.read_html",
"pandas.DataFrame",
"pypinyin.lazy_pinyin",
"plotly.express.scatter_mapbox",
"os.environ.get",
"os.path.join",
"pandas.concat"
] |
[((236, 263), 'os.environ.get', 'os.environ.get', (['"""Sync"""', '"""."""'], {}), "('Sync', '.')\n", (250, 263), False, 'import os\n'), ((456, 483), 'pandas.read_html', 'pd.read_html', (['locations_url'], {}), '(locations_url)\n', (468, 483), True, 'import pandas as pd\n'), ((1711, 1766), 'pandas.concat', 'pd.concat', (["[found['_name'], found['_province']]"], {'axis': '(0)'}), "([found['_name'], found['_province']], axis=0)\n", (1720, 1766), True, 'import pandas as pd\n'), ((2026, 2141), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['df'], {'lon': '"""Longitude"""', 'lat': '"""Latitude"""', 'color': '"""Province"""', 'hover_name': '"""Text"""', 'zoom': '(2)', 'height': '(300)'}), "(df, lon='Longitude', lat='Latitude', color='Province',\n hover_name='Text', zoom=2, height=300)\n", (2043, 2141), True, 'import plotly.express as px\n'), ((797, 811), 'pypinyin.lazy_pinyin', 'lazy_pinyin', (['s'], {}), '(s)\n', (808, 811), False, 'from pypinyin import lazy_pinyin\n'), ((1506, 1520), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1518, 1520), True, 'import pandas as pd\n'), ((309, 362), 'os.path.join', 'os.path.join', (["os.environ['onedrive']", '""".mapbox_token"""'], {}), "(os.environ['onedrive'], '.mapbox_token')\n", (321, 362), False, 'import os\n')]
|
from ismain import is_main
if is_main():
print("Hello from main.")
|
[
"ismain.is_main"
] |
[((31, 40), 'ismain.is_main', 'is_main', ([], {}), '()\n', (38, 40), False, 'from ismain import is_main\n')]
|
import numpy as np
import scipy as sp
import scipy.spatial
import matplotlib as mpl
import matplotlib.path
from ..kernels.high_level.cauchy import Cauchy_Layer_Apply
from ..point_set import PointSet
def find_interior_points(source, target, boundary_acceptable=False):
"""
quick finding of which points in target are outside vs. inside
"""
# first exclude things outside of bounding box
xmin = source.x.min()
xmax = source.x.max()
ymin = source.y.min()
ymax = source.y.max()
in_bounding_box = np.logical_and.reduce([ target.x > xmin, target.x < xmax,
target.y > ymin, target.y < ymax])
out_bounding_box = np.logical_not(in_bounding_box)
small_targ = PointSet(c=target.c[in_bounding_box])
small_targ.compute_tree()
wn = np.zeros(target.N, dtype=complex)
wn[out_bounding_box] = 0.0
# compute winding number via cauchy sums
wn[in_bounding_box] = Cauchy_Layer_Apply(source, small_targ, \
dipstr=np.ones(source.N)).real
wn = np.abs(wn)
bad = np.logical_or(np.isnan(wn), np.isinf(wn))
good = np.logical_not(bad)
big = np.zeros_like(wn)
big[good] = wn[good] > 1e5
bad = np.logical_or(big, bad)
wn[bad] = 1.0
# get region where that sum was not accurate enough
dist = source.tolerance_to_distance(1e-2)
q = target.find_near_points(source, dist).ravel()
# phys array, good except in near boundary region
wn[q] = 0.0
phys = wn > 0.5
# brute force search
poly = mpl.path.Path(source.get_stacked_boundary(T=False))
xq = target.x[q]
yq = target.y[q]
tq = np.column_stack([xq, yq])
interior = poly.contains_points(tq)
phys[q] = interior
phys[bad] = boundary_acceptable
ext = np.logical_not(phys)
return phys, ext
|
[
"numpy.zeros_like",
"numpy.abs",
"numpy.logical_not",
"numpy.zeros",
"numpy.logical_and.reduce",
"numpy.isnan",
"numpy.isinf",
"numpy.ones",
"numpy.logical_or",
"numpy.column_stack"
] |
[((530, 626), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[target.x > xmin, target.x < xmax, target.y > ymin, target.y < ymax]'], {}), '([target.x > xmin, target.x < xmax, target.y > ymin, \n target.y < ymax])\n', (551, 626), True, 'import numpy as np\n'), ((692, 723), 'numpy.logical_not', 'np.logical_not', (['in_bounding_box'], {}), '(in_bounding_box)\n', (706, 723), True, 'import numpy as np\n'), ((818, 851), 'numpy.zeros', 'np.zeros', (['target.N'], {'dtype': 'complex'}), '(target.N, dtype=complex)\n', (826, 851), True, 'import numpy as np\n'), ((1083, 1093), 'numpy.abs', 'np.abs', (['wn'], {}), '(wn)\n', (1089, 1093), True, 'import numpy as np\n'), ((1157, 1176), 'numpy.logical_not', 'np.logical_not', (['bad'], {}), '(bad)\n', (1171, 1176), True, 'import numpy as np\n'), ((1187, 1204), 'numpy.zeros_like', 'np.zeros_like', (['wn'], {}), '(wn)\n', (1200, 1204), True, 'import numpy as np\n'), ((1246, 1269), 'numpy.logical_or', 'np.logical_or', (['big', 'bad'], {}), '(big, bad)\n', (1259, 1269), True, 'import numpy as np\n'), ((1673, 1698), 'numpy.column_stack', 'np.column_stack', (['[xq, yq]'], {}), '([xq, yq])\n', (1688, 1698), True, 'import numpy as np\n'), ((1808, 1828), 'numpy.logical_not', 'np.logical_not', (['phys'], {}), '(phys)\n', (1822, 1828), True, 'import numpy as np\n'), ((1118, 1130), 'numpy.isnan', 'np.isnan', (['wn'], {}), '(wn)\n', (1126, 1130), True, 'import numpy as np\n'), ((1132, 1144), 'numpy.isinf', 'np.isinf', (['wn'], {}), '(wn)\n', (1140, 1144), True, 'import numpy as np\n'), ((1050, 1067), 'numpy.ones', 'np.ones', (['source.N'], {}), '(source.N)\n', (1057, 1067), True, 'import numpy as np\n')]
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
from __future__ import print_function, division, absolute_import
import flask
import jinja2
jinjablue = flask.Blueprint('jinja_filters', __name__)
@jinja2.contextfilter
@jinjablue.app_template_filter()
def split(context, value, delim=None):
'''Split a string based on a delimiter'''
if not delim:
delim = ' '
return value.split(delim) if value else None
|
[
"flask.Blueprint"
] |
[((198, 240), 'flask.Blueprint', 'flask.Blueprint', (['"""jinja_filters"""', '__name__'], {}), "('jinja_filters', __name__)\n", (213, 240), False, 'import flask\n')]
|
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
from utils.get_resnet_layer import get_resnet_depth_from_name
from . import dct_capsnet_e1_graph_mnist
from . import dct_capsnet_h1_attention_mnist
from . import dct_capsnet_h1_graph_mnist
from . import dct_capsnet_h1_gumbel_gate_mnist
from . import dwt_capsnet_e1_graph_mnist
from . import dwt_capsnet_e1_graph_smallnorb
from . import dwt_capsnet_fpn_graph_mnist
from . import dwt_capsnet_fpn_graph_smallnorb
from . import dwt_resnet_capsnet_fpn_graph_cifar
from . import rfft_capsnet_e1_graph_mnist
from . import wst_capsnet_e1_graph_mnist
from .call_backs import get_callbacks
from ..etc_model.call_backs import get_callbacks as etc_callback
from ..layers.model_base import Model
from utils.dataset import Dataset
from utils.tools import marginLoss
class TSSCapsNet(Model):
def __init__(self, data_name, model_name='DCT_Efficient_CapsNet', mode='test', config_path='config.json',
custom_path=None, verbose=True, gpu_number=None, optimizer='Adam', half_filter_in_resnet=True,
use_tiny_block=True, heterogeneous=False, **kwargs):
Model.__init__(self, data_name, mode, config_path, verbose)
self.model_name = model_name
if custom_path != None:
self.model_path = custom_path
else:
self.model_path = os.path.join(self.config['saved_model_dir'],
f"{self.model_name}",
f"{self.model_name}_{self.data_name}.h5")
os.makedirs(os.path.join(self.config['saved_model_dir'], f"{self.model_name}"), exist_ok=True)
self.model_path_new_train = os.path.join(self.config['saved_model_dir'],
f"{self.model_name}",
f"{self.model_name}_{self.data_name}_{'{epoch:03d}'}.h5")
self.tb_path = os.path.join(self.config['tb_log_save_dir'], f"{self.model_name}_{self.data_name}")
self.half = half_filter_in_resnet
self.tiny = use_tiny_block
self.heterogeneous = heterogeneous
self.load_graph()
if gpu_number:
self.model = multi_gpu_model(self.model, gpu_number)
self.optimizer = optimizer
def load_graph(self):
if self.data_name in ['MNIST', 'MNIST_SHIFT', 'FASHION_MNIST', 'FASHION_MNIST_SHIFT']:
input_shape = self.config['MNIST_INPUT_SHAPE']
num_classes = 10
elif self.data_name in ['CIFAR10', 'CIFAR10_SHIFT']:
input_shape = self.config['CIFAR10_INPUT_SHAPE']
num_classes = 10
elif self.data_name == 'SMALLNORB_INPUT_SHAPE':
num_classes = 5
input_shape = self.config['CIFAR10_INPUT_SHAPE']
elif self.data_name == 'MULTIMNIST':
raise NotImplemented
else:
raise NotImplementedError
if self.model_name == "DCT_E_MNIST":
self.model = dct_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "DCT_H_A_MNIST":
self.model = dct_capsnet_h1_attention_mnist.build_graph(input_shape, self.mode, 3, self.model_name)
elif self.model_name == "DCT_H_MNIST":
self.model = dct_capsnet_h1_graph_mnist.build_graph(input_shape, self.mode, 3, self.model_name)
elif self.model_name == "DCT_H_Gumbel_MNIST":
self.model = dct_capsnet_h1_gumbel_gate_mnist.build_graph(input_shape, self.mode, 3, self.model_name)
elif self.model_name == "DWT_E_MNIST":
self.model = dwt_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "DWT_E_SMALLNORB":
self.model = dwt_capsnet_e1_graph_smallnorb.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "DWT_FPN_MNIST":
self.model = dwt_capsnet_fpn_graph_mnist.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Tiny_FPN_MNIST":
self.model = dwt_capsnet_fpn_graph_mnist.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Attention_FPN_MNIST":
self.model = dwt_capsnet_fpn_graph_mnist.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_FPN_SMALLNORB":
self.model = dwt_capsnet_fpn_graph_smallnorb.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Tiny_FPN_SMALLNORB":
self.model = dwt_capsnet_fpn_graph_smallnorb.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "DWT_Attention_FPN_SMALLNORB":
self.model = dwt_capsnet_fpn_graph_smallnorb.build_graph(input_shape, self.mode, num_classes,
['FPN', 'FPN', 'FPN'], regularize=1e-4,
name=self.model_name)
elif self.model_name == "RFFT_E_MNIST":
self.model = rfft_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name == "WST_E_MNIST":
self.model = wst_capsnet_e1_graph_mnist.build_graph(input_shape, self.mode, self.model_name)
elif self.model_name.startswith("DWT_") and self.model_name.endswith("_FPN_CIFAR"):
# example: "DWT_Tiny_Half_R18_Tiny_FPN_CIFAR"
half = True if "Half_R" in self.model_name else False
tiny = True if "DWT_Tiny" in self.model_name else False
if "Tiny_FPN_CIFAR" in self.model_name:
routing_name_list = ["Tiny_FPN", "Tiny_FPN", "Tiny_FPN"]
elif "Attention_FPN_CIFAR" in self.model_name:
routing_name_list = ['Attention', 'Attention', 'Attention']
elif "FPN_CIFAR" in self.model_name:
routing_name_list = ['FPN', 'FPN', 'FPN']
else:
print("FPN type is not support!")
raise NotImplementedError
self.model = dwt_resnet_capsnet_fpn_graph_cifar.build_graph(
input_shape, self.mode, num_classes=10, routing_name_list=routing_name_list, regularize=1e-4,
depth=get_resnet_depth_from_name(self.model_name), tiny=tiny, half=half, name=self.model_name,
heterogeneous=self.heterogeneous
)
else:
print(f"model name {self.model_name} is NotImplemented")
raise NotImplemented
def train(self, dataset=None, initial_epoch=0):
callbacks = get_callbacks(self.model_name,
self.tb_path,
self.model_path_new_train,
self.config['lr_dec'],
self.config['lr'],
optimizer=self.optimizer)
if dataset is None:
dataset = Dataset(self.data_name, self.config_path)
dataset_train, dataset_val = dataset.get_tf_data()
if self.optimizer == 'Adam':
self.model.compile(optimizer=tf.keras.optimizers.Adam(lr=self.config['lr'], momentum=0.9),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={self.model_name: 'accuracy'})
else:
self.model.compile(optimizer=tf.keras.optimizers.SGD(lr=self.config['lr']),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={self.model_name: 'accuracy'})
# self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['lr']),
# loss=[marginLoss, 'mse'],
# loss_weights=[1., self.config['lmd_gen']],
# metrics={self.model_name: 'accuracy'})
steps = None
print('-' * 30 + f'{self.data_name} train' + '-' * 30)
history = self.model.fit(dataset_train,
epochs=self.config[f'epochs'], steps_per_epoch=steps,
validation_data=dataset_val, batch_size=self.config['batch_size'],
initial_epoch=initial_epoch,
callbacks=callbacks,
workers=self.config['num_workers'])
self.model.save_weights(os.path.join(self.config['saved_model_dir'],
f"{self.model_name}",
f"{self.model_name}_{self.data_name}.h5"))
return history
|
[
"utils.get_resnet_layer.get_resnet_depth_from_name",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.optimizers.Adam",
"tensorflow.python.keras.utils.multi_gpu_utils.multi_gpu_model",
"utils.dataset.Dataset",
"os.path.join"
] |
[((2412, 2540), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""{self.model_name}"""', 'f"""{self.model_name}_{self.data_name}_{\'{epoch:03d}\'}.h5"""'], {}), '(self.config[\'saved_model_dir\'], f\'{self.model_name}\',\n f"{self.model_name}_{self.data_name}_{\'{epoch:03d}\'}.h5")\n', (2424, 2540), False, 'import os\n'), ((2658, 2745), 'os.path.join', 'os.path.join', (["self.config['tb_log_save_dir']", 'f"""{self.model_name}_{self.data_name}"""'], {}), "(self.config['tb_log_save_dir'],\n f'{self.model_name}_{self.data_name}')\n", (2670, 2745), False, 'import os\n'), ((2076, 2188), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""{self.model_name}"""', 'f"""{self.model_name}_{self.data_name}.h5"""'], {}), "(self.config['saved_model_dir'], f'{self.model_name}',\n f'{self.model_name}_{self.data_name}.h5')\n", (2088, 2188), False, 'import os\n'), ((2292, 2358), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""{self.model_name}"""'], {}), "(self.config['saved_model_dir'], f'{self.model_name}')\n", (2304, 2358), False, 'import os\n'), ((2936, 2975), 'tensorflow.python.keras.utils.multi_gpu_utils.multi_gpu_model', 'multi_gpu_model', (['self.model', 'gpu_number'], {}), '(self.model, gpu_number)\n', (2951, 2975), False, 'from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model\n'), ((8705, 8746), 'utils.dataset.Dataset', 'Dataset', (['self.data_name', 'self.config_path'], {}), '(self.data_name, self.config_path)\n', (8712, 8746), False, 'from utils.dataset import Dataset\n'), ((10284, 10396), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""{self.model_name}"""', 'f"""{self.model_name}_{self.data_name}.h5"""'], {}), "(self.config['saved_model_dir'], f'{self.model_name}',\n f'{self.model_name}_{self.data_name}.h5')\n", (10296, 10396), False, 'import os\n'), ((8885, 8945), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': "self.config['lr']", 'momentum': '(0.9)'}), "(lr=self.config['lr'], momentum=0.9)\n", (8909, 8945), True, 'import tensorflow as tf\n'), ((9203, 9248), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': "self.config['lr']"}), "(lr=self.config['lr'])\n", (9226, 9248), True, 'import tensorflow as tf\n'), ((8003, 8046), 'utils.get_resnet_layer.get_resnet_depth_from_name', 'get_resnet_depth_from_name', (['self.model_name'], {}), '(self.model_name)\n', (8029, 8046), False, 'from utils.get_resnet_layer import get_resnet_depth_from_name\n')]
|
import re
from logging import Logger
from typing import Dict
import yaml
OptionValue = int or float or bool or str
class BasePreprocessor():
'''Base preprocessor. All preprocessors must inherit from this one.'''
# pylint: disable=too-many-instance-attributes
defaults = {}
tags = ()
@staticmethod
def get_options(options_string: str) -> Dict[str, OptionValue]:
'''Get a dictionary of typed options from a string with XML attributes.
:param options_string: String of XML attributes
:returns: Dictionary with options
'''
if not options_string:
return {}
option_pattern = re.compile(
r'(?P<key>[A-Za-z_:][0-9A-Za-z_:\-\.]*)=(\'|")(?P<value>.+?)\2',
flags=re.DOTALL
)
return {
option.group('key'): yaml.load(option.group('value'), yaml.Loader)
for option in option_pattern.finditer(options_string)
}
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False, options={}):
# pylint: disable=dangerous-default-value
# pylint: disable=too-many-arguments
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.options = {**self.defaults, **options}
self.working_dir = self.project_path / self.config['tmp_dir']
if self.tags:
self.pattern = re.compile(
rf'(?<!\<)\<(?P<tag>{"|".join(self.tags)})' +
r'(\s(?P<options>[^\<\>]*))?\>' +
r'(?P<body>.*?)\<\/(?P=tag)\>',
flags=re.DOTALL
)
def apply(self):
'''Run the preprocessor against the project directory. Must be implemented
by every preprocessor.
'''
raise NotImplementedError
|
[
"re.compile"
] |
[((662, 762), 're.compile', 're.compile', (['"""(?P<key>[A-Za-z_:][0-9A-Za-z_:\\\\-\\\\.]*)=(\\\\\'|")(?P<value>.+?)\\\\2"""'], {'flags': 're.DOTALL'}), '(\'(?P<key>[A-Za-z_:][0-9A-Za-z_:\\\\-\\\\.]*)=(\\\\\\\'|")(?P<value>.+?)\\\\2\',\n flags=re.DOTALL)\n', (672, 762), False, 'import re\n')]
|
from .db import db
from sqlalchemy.sql import func
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, nullable=False, primary_key=True)
commentBody = db.Column(db.String(255), nullable=False)
createdAt = db.Column(db.DateTime(timezone=True), nullable=False, server_default=func.now())
projectId = db.Column(db.Integer, db.ForeignKey("projects.id"), nullable = False)
userId = db.Column(db.Integer, db.ForeignKey("users.id"), nullable = False)
project = db.relationship("Project", back_populates="comments")
user = db.relationship("User", back_populates="comments")
def to_dict(self):
return {
"id": self.id,
"commentBody": self.commentBody,
"createdAt": self.createdAt,
"projectId": self.projectId,
"userId": self.userId
}
|
[
"sqlalchemy.sql.func.now"
] |
[((320, 330), 'sqlalchemy.sql.func.now', 'func.now', ([], {}), '()\n', (328, 330), False, 'from sqlalchemy.sql import func\n')]
|
import json
import unittest
from jsonschema import validate, ValidationError
from views import app
import os
import urllib.parse
from urllib.parse import urlencode
def testget(url):
client = app.test_client()
client.testing = True
return json.loads(client.get(url).get_data(as_text=True))
class ResponseTest(unittest.TestCase):
def __init__(self):
self.client = app.test_client()
self.client.testing = True
def json_response(self, url):
return json.loads(self.client.get(url).get_data(as_text=True))
def assert_schema(obj, schema, test_name):
try:
validate(obj, schema)
except ValidationError as e:
raise AssertionError(u'error in {}: {}'.format(test_name, str(e)))
def dev_request_url(path, params=None):
params = params.copy() if params else {}
params.update({
'jwt': os.environ["UNSUB_USER1_JWT"]
})
return urllib.parse.urlunparse([
'',
'',
path,
'',
urlencode(params),
'',
])
|
[
"jsonschema.validate",
"views.app.test_client",
"urllib.parse.urlencode"
] |
[((199, 216), 'views.app.test_client', 'app.test_client', ([], {}), '()\n', (214, 216), False, 'from views import app\n'), ((391, 408), 'views.app.test_client', 'app.test_client', ([], {}), '()\n', (406, 408), False, 'from views import app\n'), ((612, 633), 'jsonschema.validate', 'validate', (['obj', 'schema'], {}), '(obj, schema)\n', (620, 633), False, 'from jsonschema import validate, ValidationError\n'), ((997, 1014), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (1006, 1014), False, 'from urllib.parse import urlencode\n')]
|
import subprocess
FILE = "/opt/protostar/bin/format1"
#9 bytes
ADDRESS = "BBBB\x38\x96\x04\x08B"
def craft_payload(string):
string += ADDRESS
string += "%130$n"
return string
def main():
payload = craft_payload("", NUM_POINTERS)
subprocess.call([FILE, payload])
if __name__ == "__main__":
main()
|
[
"subprocess.call"
] |
[((276, 308), 'subprocess.call', 'subprocess.call', (['[FILE, payload]'], {}), '([FILE, payload])\n', (291, 308), False, 'import subprocess\n')]
|
# coding: iso-8859-1 -*-
import random
from time import sleep
jogar ='sim'
print("_________________________")
print("!!Bem-vindo ao Jokenpo!!")
print("_________________________")
resultado = ['Vitória do CPU!', 'Houve um empate!', 'O usuário venceu!']
jogadas = ['pedra', 'papel', 'tesoura']
pontos_user = 0
pontos_cpu = 0
while jogar == 'sim':
user = input(f'Escolha uma jogada entre {jogadas}: ').lower()
if user not in jogadas:
user = input (f'As jogadas válidas são {jogadas}. Escolha uma entre elas: ').lower()
print('Pronto?')
cpu = random.choice(jogadas)
sleep(3)
print('Processando jogadas!')
sleep(2)
# 0 1 2
#jogadas = ['pedra', 'papel', 'tesoura']
#resultado = ['Vitória do CPU!', 'Houve um empate!', 'O usuário venceu!']
if user in jogadas[0] and cpu in jogadas[1] or user in jogadas[1] and cpu in jogadas[2] or user in jogadas[2] and cpu in jogadas[0]:
result =0
elif user == cpu:
result = 1
else:
result =2
if result == 1:
caso= resultado[1]
elif result == 0:
caso= resultado[0]
pontos_cpu +=1
else:
caso = resultado[2]
pontos_user += 1
print(f'Usuário escolheu {user} e CPU {cpu}.{caso} Placar: Usuário {pontos_user} x CPU {pontos_cpu}.')
jogar = input('Gostaria de jogar novamente? Digite sim ou não:').lower()
while jogar != "sim" and jogar != "não":
jogar = input('Opção inválida! Gostaria de jogar novamente? Digite sim ou não: \n').lower()
if pontos_cpu > pontos_user:
print(f'Placar final: Usuário {pontos_user}x CPU {pontos_cpu}. Vitória do CPU. Melhor sorte na próxima vez!')
elif pontos_user > pontos_cpu:
print(f'Placar final: Usuário {pontos_user}x CPU {pontos_cpu}. Parabéns!!!! Você venceu!! Até a próxima')
else:
print(f'Placar final: Usuário {pontos_user}x CPU {pontos_cpu}. Desta vez empatamos! Até a próxima!')
|
[
"random.choice",
"time.sleep"
] |
[((588, 610), 'random.choice', 'random.choice', (['jogadas'], {}), '(jogadas)\n', (601, 610), False, 'import random\n'), ((616, 624), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (621, 624), False, 'from time import sleep\n'), ((665, 673), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (670, 673), False, 'from time import sleep\n')]
|
import os
# in a real project, this script probably wouldn't make so many assumptions
# about the inputs and outputs
NG_DIST_DIR_PATH = '/ng-notebook/angular/dist/'
OUTFILE_PATH = '/hostmount/output/index.html'
# ng build's index.html will be a small, single-line file
with open(os.path.join(NG_DIST_DIR_PATH, 'index.html'), 'r') as infile:
html = infile.read()
# replace the favicon with inline data
inline_data = "window['breedCounts'] = {" + \
"appenzeller: 6, briard: 9, cotondetulear: 5, dhole: 1, eskimo: 4};"
html = html.replace(\
'<link rel="icon" type="image/x-icon" href="favicon.ico">',
'<script type="text/javascript">' + inline_data + '</script>')
# insert the css
with open(os.path.join(NG_DIST_DIR_PATH, 'styles.bundle.css'), 'r') as infile:
css = infile.read()
html = html.replace(\
'<link href="styles.bundle.css" rel="stylesheet"/>',
'<style>' + css + '</style>')
# insert the js bundles (there are three)
js_files = ['inline.bundle.js', 'polyfills.bundle.js', 'main.bundle.js']
for js_file in js_files:
with open(os.path.join(NG_DIST_DIR_PATH, js_file), 'r') as infile:
js = infile.read()
html = html.replace(\
'<script type="text/javascript" src="' + js_file + '"></script>',
'<script type="text/javascript">' + js + '</script>')
# write the final html
with open(OUTFILE_PATH, 'w') as outfile:
outfile.write(html)
|
[
"os.path.join"
] |
[((281, 325), 'os.path.join', 'os.path.join', (['NG_DIST_DIR_PATH', '"""index.html"""'], {}), "(NG_DIST_DIR_PATH, 'index.html')\n", (293, 325), False, 'import os\n'), ((708, 759), 'os.path.join', 'os.path.join', (['NG_DIST_DIR_PATH', '"""styles.bundle.css"""'], {}), "(NG_DIST_DIR_PATH, 'styles.bundle.css')\n", (720, 759), False, 'import os\n'), ((1081, 1120), 'os.path.join', 'os.path.join', (['NG_DIST_DIR_PATH', 'js_file'], {}), '(NG_DIST_DIR_PATH, js_file)\n', (1093, 1120), False, 'import os\n')]
|
import string
from typing import List
from envinorma.models import ArreteMinisteriel, StructuredText
from text_diff import TextDifferences, text_differences
from unidecode import unidecode
_SIMPLE_CHARS = set(string.ascii_letters + string.digits + string.whitespace)
def _clean_line(line: str) -> str:
res = str(unidecode(line)).strip()
return ''.join(c for c in res if c in _SIMPLE_CHARS)
def extract_am_lines(am: ArreteMinisteriel, normalize_text: bool) -> List[str]:
lines = [line for section in am.sections for line in section.text_lines(1)]
if normalize_text:
return [_clean_line(line) for line in lines]
return lines
def compute_am_diff(am_before: ArreteMinisteriel, am_after: ArreteMinisteriel, normalize_text: bool) -> TextDifferences:
lines_before = extract_am_lines(am_before, normalize_text)
lines_after = extract_am_lines(am_after, normalize_text)
return text_differences(lines_before, lines_after)
def compute_text_diff(text_before: StructuredText, text_after: StructuredText) -> TextDifferences:
lines_before = text_before.text_lines()
lines_after = text_after.text_lines()
return text_differences(lines_before, lines_after)
|
[
"unidecode.unidecode",
"text_diff.text_differences"
] |
[((915, 958), 'text_diff.text_differences', 'text_differences', (['lines_before', 'lines_after'], {}), '(lines_before, lines_after)\n', (931, 958), False, 'from text_diff import TextDifferences, text_differences\n'), ((1157, 1200), 'text_diff.text_differences', 'text_differences', (['lines_before', 'lines_after'], {}), '(lines_before, lines_after)\n', (1173, 1200), False, 'from text_diff import TextDifferences, text_differences\n'), ((320, 335), 'unidecode.unidecode', 'unidecode', (['line'], {}), '(line)\n', (329, 335), False, 'from unidecode import unidecode\n')]
|
from clawpack import pyclaw
from clawpack.pyclaw.solution import Solution
class Solution(Solution):
""" Parallel Solution class.
"""
__doc__ += pyclaw.util.add_parent_doc(pyclaw.Solution)
def get_read_func(self, file_format):
from clawpack.petclaw import io
if file_format == 'petsc':
return io.petsc.read
elif file_format == 'hdf5':
return io.hdf5.read
else:
raise ValueError("File format %s not supported." % file_format)
def get_write_func(self, file_format):
from clawpack.petclaw import io
if 'petsc' in file_format:
return io.petsc.write
elif 'hdf5' in file_format:
return io.hdf5.write
else:
raise ValueError("File format %s not supported." % file_format)
|
[
"clawpack.pyclaw.util.add_parent_doc"
] |
[((157, 200), 'clawpack.pyclaw.util.add_parent_doc', 'pyclaw.util.add_parent_doc', (['pyclaw.Solution'], {}), '(pyclaw.Solution)\n', (183, 200), False, 'from clawpack import pyclaw\n')]
|
import re
from dataclasses import dataclass
from datetime import date, datetime
from typing import Iterator
import requests
from bs4 import BeautifulSoup
@dataclass
class Counters:
tests: int
positive: int
recoveries: int
deaths: int
vaccines: int
@dataclass
class CaseImage:
url: str
date: date
class SACoronavirusClient:
def __init__(self):
self.session = requests.Session()
def get_homepage(self) -> str:
with self.session as session:
response = session.get(
"https://sacoronavirus.co.za/",
headers={"User-Agent": "contactndoh-whatsapp"},
timeout=30,
)
response.raise_for_status()
return response.text
def get_daily_cases_page(self) -> str:
with self.session as session:
response = session.get(
"https://sacoronavirus.co.za/category/daily-cases",
headers={"User-Agent": "contactndoh-whatsapp"},
timeout=30,
)
response.raise_for_status()
return response.text
def get_homepage_counters(self) -> Counters:
soup = BeautifulSoup(self.get_homepage(), "html.parser")
counters = soup.find("div", class_="counters-box")
for counter in counters.find_all("div", "counter-box-container"):
name = counter.find("div", "counter-box-content").string
if "test" in name.lower():
tests = int(counter.span["data-value"])
elif "case" in name.lower():
positive = int(counter.span["data-value"])
elif "recover" in name.lower():
recoveries = int(counter.span["data-value"])
elif "death" in name.lower():
deaths = int(counter.span["data-value"])
elif "vaccine" in name.lower():
vaccines = int(counter.span["data-value"])
return Counters(
tests=tests,
positive=positive,
recoveries=recoveries,
deaths=deaths,
vaccines=vaccines,
)
def get_daily_cases_image_urls(self) -> Iterator[CaseImage]:
soup = BeautifulSoup(self.get_daily_cases_page(), "html.parser")
for article in soup.main.find_all("article"):
url = article.img["src"]
d = article.select("h2.entry-title")[0].string
d = re.search(".*\((.*)\).*", d).group(1)
d = datetime.strptime(d, "%A %d %B %Y").date()
yield CaseImage(url=url, date=d)
|
[
"datetime.datetime.strptime",
"requests.Session",
"re.search"
] |
[((405, 423), 'requests.Session', 'requests.Session', ([], {}), '()\n', (421, 423), False, 'import requests\n'), ((2434, 2464), 're.search', 're.search', (['""".*\\\\((.*)\\\\).*"""', 'd'], {}), "('.*\\\\((.*)\\\\).*', d)\n", (2443, 2464), False, 'import re\n'), ((2488, 2523), 'datetime.datetime.strptime', 'datetime.strptime', (['d', '"""%A %d %B %Y"""'], {}), "(d, '%A %d %B %Y')\n", (2505, 2523), False, 'from datetime import date, datetime\n')]
|
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< #
# Module for building transfer learning framework #
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
import tensorflow as tf
import tensorflow.keras.layers as kl
import tensorflow.keras.regularizers as kreg
import tensorflow.keras.initializers as kini
def build_NN_model(config, model_map):
"""
"""
# load config variables
model_type = config["model_type"]
img_dim = config["img_dim"]
params = config["params"]
# load pre-trained model
pre_net = model_map[model_type]["model"](
weights='imagenet',
input_shape=(img_dim, img_dim, 3),
include_top=False
)
pre_net.trainable = False
# init input list for neural network
all_inputs = []
# image input
img_in = kl.Input(shape=(img_dim, img_dim, 3))
all_inputs.append(img_in)
# pre-processing
if model_map[model_type]["preproc"] is not None:
img_x = model_map[model_type]["preproc"](img_in)
img_x = pre_net(img_x, training=False)
else:
img_x = pre_net(img_in, training=False)
# tabular metadata inputs
x_in = kl.Input(shape=len(config["tab_feats"]))
all_inputs.append(x_in)
# image data processing
if config["conv_proc"]:
d = model_map[model_type]["final_shape"]
all_x = kl.Reshape((d[0], d[1]*d[2], 1))(img_x)
all_x = kl.Conv2D(
filters=params["conv"]["nf"],
kernel_size=d[:2], strides=d[:2], name="post_conv2D",
kernel_regularizer=kreg.l2(params["conv"]["l2"]),
kernel_initializer=kini.RandomUniform(
minval=1/((d[0]+1)*(d[1]+1)),
maxval=1/((d[0]-1)*(d[1]-1))
)
)(all_x)
all_x = kl.Flatten()(all_x)
else:
all_x = kl.GlobalAvgPool2D()(img_x)
# add tabular features and then dropout
if config["batch_norm"]:
all_x = kl.BatchNormalization()(all_x)
all_x = kl.Concatenate()([all_x, x_in])
all_x = kl.Dropout(params["drop"])(all_x)
# additional dense layer
if config["extra_dense"]:
all_x = kl.Dense(
params["xtra"]["n"],
activation="linear",
kernel_regularizer=kreg.l1_l2(l1=params["xtra"]["l1"],
l2=params["xtra"]["l2"]),
name="extra_dense"
)(all_x)
if params["xtra"]["acti"] == "relu":
all_x = kl.LeakyReLU(alpha=params["xtra"]["relu_alpha"])(all_x)
elif params["xtra"]["acti"] == "elu":
all_x = kl.ELU()(all_x)
elif params["xtra"]["acti"] == "prelu":
all_x = kl.PReLU()(all_x)
else: # for sigmoid and tanh
if params["xtra"]["acti"] != "linear":
all_x = kl.Activation(params["xtra"]["acti"])(all_x)
else:
pass
# final output layer
all_x = kl.Dense(
1, activation="sigmoid", name="final_layer",
kernel_regularizer=kreg.l2(params["outy"]["l2"])
)(all_x)
out_y = kl.Lambda(lambda x: x * 100)(all_x)
# compile model
model = tf.keras.Model(inputs=all_inputs, outputs=out_y)
return(model)
|
[
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.ELU",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.layers.PReLU",
"tensorflow.keras.Model",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"tensorflow.keras.regularizers.l1_l2",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Flatten"
] |
[((822, 859), 'tensorflow.keras.layers.Input', 'kl.Input', ([], {'shape': '(img_dim, img_dim, 3)'}), '(shape=(img_dim, img_dim, 3))\n', (830, 859), True, 'import tensorflow.keras.layers as kl\n'), ((3180, 3228), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'all_inputs', 'outputs': 'out_y'}), '(inputs=all_inputs, outputs=out_y)\n', (3194, 3228), True, 'import tensorflow as tf\n'), ((2002, 2018), 'tensorflow.keras.layers.Concatenate', 'kl.Concatenate', ([], {}), '()\n', (2016, 2018), True, 'import tensorflow.keras.layers as kl\n'), ((2046, 2072), 'tensorflow.keras.layers.Dropout', 'kl.Dropout', (["params['drop']"], {}), "(params['drop'])\n", (2056, 2072), True, 'import tensorflow.keras.layers as kl\n'), ((3111, 3139), 'tensorflow.keras.layers.Lambda', 'kl.Lambda', (['(lambda x: x * 100)'], {}), '(lambda x: x * 100)\n', (3120, 3139), True, 'import tensorflow.keras.layers as kl\n'), ((1364, 1398), 'tensorflow.keras.layers.Reshape', 'kl.Reshape', (['(d[0], d[1] * d[2], 1)'], {}), '((d[0], d[1] * d[2], 1))\n', (1374, 1398), True, 'import tensorflow.keras.layers as kl\n'), ((1791, 1803), 'tensorflow.keras.layers.Flatten', 'kl.Flatten', ([], {}), '()\n', (1801, 1803), True, 'import tensorflow.keras.layers as kl\n'), ((1837, 1857), 'tensorflow.keras.layers.GlobalAvgPool2D', 'kl.GlobalAvgPool2D', ([], {}), '()\n', (1855, 1857), True, 'import tensorflow.keras.layers as kl\n'), ((1955, 1978), 'tensorflow.keras.layers.BatchNormalization', 'kl.BatchNormalization', ([], {}), '()\n', (1976, 1978), True, 'import tensorflow.keras.layers as kl\n'), ((2501, 2549), 'tensorflow.keras.layers.LeakyReLU', 'kl.LeakyReLU', ([], {'alpha': "params['xtra']['relu_alpha']"}), "(alpha=params['xtra']['relu_alpha'])\n", (2513, 2549), True, 'import tensorflow.keras.layers as kl\n'), ((3056, 3085), 'tensorflow.keras.regularizers.l2', 'kreg.l2', (["params['outy']['l2']"], {}), "(params['outy']['l2'])\n", (3063, 3085), True, 'import tensorflow.keras.regularizers as kreg\n'), ((1571, 1600), 'tensorflow.keras.regularizers.l2', 'kreg.l2', (["params['conv']['l2']"], {}), "(params['conv']['l2'])\n", (1578, 1600), True, 'import tensorflow.keras.regularizers as kreg\n'), ((1633, 1731), 'tensorflow.keras.initializers.RandomUniform', 'kini.RandomUniform', ([], {'minval': '(1 / ((d[0] + 1) * (d[1] + 1)))', 'maxval': '(1 / ((d[0] - 1) * (d[1] - 1)))'}), '(minval=1 / ((d[0] + 1) * (d[1] + 1)), maxval=1 / ((d[0] -\n 1) * (d[1] - 1)))\n', (1651, 1731), True, 'import tensorflow.keras.initializers as kini\n'), ((2274, 2334), 'tensorflow.keras.regularizers.l1_l2', 'kreg.l1_l2', ([], {'l1': "params['xtra']['l1']", 'l2': "params['xtra']['l2']"}), "(l1=params['xtra']['l1'], l2=params['xtra']['l2'])\n", (2284, 2334), True, 'import tensorflow.keras.regularizers as kreg\n'), ((2623, 2631), 'tensorflow.keras.layers.ELU', 'kl.ELU', ([], {}), '()\n', (2629, 2631), True, 'import tensorflow.keras.layers as kl\n'), ((2707, 2717), 'tensorflow.keras.layers.PReLU', 'kl.PReLU', ([], {}), '()\n', (2715, 2717), True, 'import tensorflow.keras.layers as kl\n'), ((2841, 2878), 'tensorflow.keras.layers.Activation', 'kl.Activation', (["params['xtra']['acti']"], {}), "(params['xtra']['acti'])\n", (2854, 2878), True, 'import tensorflow.keras.layers as kl\n')]
|
import os.path as osp
import os
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
import moviepy.editor as mpy
import tqdm
from contextlib import contextmanager
from mpi4py import MPI
import imageio
from baselines import logger
import baselines.common.tf_util as U
from baselines.common import colorize
from baselines.common.mpi_adam import MpiAdam
import dataset
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
class GlobalTrainer(object):
def __init__(self, name, env, runner, policy, config):
self._name = name
self._env = env.unwrapped
self._runner = runner
self._config = config
self._policy = policy
self._is_chef = (MPI.COMM_WORLD.Get_rank() == 0)
# global step
self.global_step = tf.Variable(0, name='global_step', dtype=tf.int64, trainable=False)
self._update_global_step = tf.assign(self.global_step, self.global_step + 1)
# tensorboard summary
self.summary_name = ['global/length', 'global/reward', 'global/success']
# build loss/optimizers
self._build_distillation()
def _build_distillation(self):
config = self._config
pi = self._policy
self._global_norm = U.function(
[], tf.global_norm([tf.cast(var, tf.float32) for var in pi.get_variables()]))
# policy update
ac = pi.pdtype.sample_placeholder([None])
pol_var_list = [v for v in pi.get_trainable_variables() if 'pol' in v.name]
self._pol_adam = MpiAdam(pol_var_list)
pol_loss = tf.reduce_mean(pi.pd.neglogp(ac))
#pol_loss = tf.reduce_mean(tf.square(pi.pd.sample() - ac))
fetch_dict = {
'loss': pol_loss,
'g': U.flatgrad(pol_loss, pol_var_list,
clip_norm=config.global_max_grad_norm)
}
self._pol_loss = U.function([ac] + pi.ob, fetch_dict)
self.summary_name += ['global/loss', 'global/grad_norm', 'global/global_norm']
# value update
if config.global_vf:
ret = tf.placeholder(dtype=tf.float32, shape=[None], name='return')
vf_var_list = [v for v in pi.get_trainable_variables() if 'vf' in v.name]
self._vf_adam = MpiAdam(vf_var_list)
vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret))
fetch_dict = {
'vf_loss': vf_loss,
'vf_g': U.flatgrad(vf_loss, vf_var_list,
clip_norm=config.global_max_grad_norm)
}
self._vf_loss = U.function([ret] + pi.ob, fetch_dict)
self.summary_name += ['global/vf_loss', 'global/vf_grad_norm']
# initialize and sync
U.initialize()
self._pol_adam.sync()
if config.global_vf:
self._vf_adam.sync()
if config.debug:
logger.log("[worker: {} global] Init param sum".format(MPI.COMM_WORLD.Get_rank()), self._adam.getflat().sum())
@contextmanager
def timed(self, msg):
if self._is_chef:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
def update(self, step, ob, ac, ret=None):
info = defaultdict(list)
config = self._config
sess = U.get_session()
global_step = sess.run(self.global_step)
sess.run(self._update_global_step)
pi = self._policy
ob_dict = self._env.get_ob_dict(ob)
if self._config.obs_norm == 'learn':
for ob_name in pi.ob_type:
pi.ob_rms[ob_name].update(ob_dict[ob_name])
with self.timed("update global network"):
for _ in range(self._config.global_iters):
# policy network
for (mb_ob, mb_ac) in dataset.iterbatches(
(ob, ac), include_final_partial_batch=False,
batch_size=self._config.global_batch_size):
ob_list = pi.get_ob_list(mb_ob)
fetched = self._pol_loss(mb_ac, *ob_list)
loss, g = fetched['loss'], fetched['g']
self._pol_adam.update(g, self._config.global_stepsize)
info['global/loss'].append(np.mean(loss))
info['global/grad_norm'].append(np.linalg.norm(g))
if config.global_vf:
# value network
for (mb_ob, mb_ret) in dataset.iterbatches(
(ob, ret), include_final_partial_batch=False,
batch_size=self._config.global_batch_size):
ob_list = pi.get_ob_list(mb_ob)
fetched = self._vf_loss(mb_ret, *ob_list)
vf_loss, vf_g = fetched['vf_loss'], fetched['vf_g']
self._vf_adam.update(vf_g, self._config.global_stepsize)
info['global/vf_loss'].append(np.mean(vf_loss))
info['global/vf_grad_norm'].append(np.linalg.norm(vf_g))
for key, value in info.items():
info[key] = np.mean(value)
info['global/global_norm'] = self._global_norm()
return info
def summary(self, it):
info = self.evaluate(it, record=self._config.training_video_record)
# save checkpoint
if it % self._config.ckpt_save_step == 0:
fname = osp.join(self._config.log_dir, '%.5d' % it)
U.save_state(fname)
return info
def evaluate(self, ckpt_num=None, record=False):
config = self._config
ep_lens = []
ep_rets = []
ep_success = []
if record:
record_dir = osp.join(config.log_dir, 'video')
os.makedirs(record_dir, exist_ok=True)
for _ in tqdm.trange(10):
ep_traj = self._runner.rollout(True, True)
ep_lens.append(ep_traj["ep_length"][0])
ep_rets.append(ep_traj["ep_reward"][0])
ep_success.append(ep_traj["ep_success"][0])
logger.log('[{}] Trial #{}: lengths {}, returns {}'.format(
self._name, _, ep_traj["ep_length"][0], ep_traj["ep_reward"][0]))
# Video recording
if record:
visual_obs = ep_traj["visual_ob"]
video_name = '{}{}_{}{}.{}'.format(config.video_prefix or '', self._name,
'' if ckpt_num is None else 'ckpt_{}_'.format(ckpt_num), _, config.video_format)
video_path = osp.join(record_dir, video_name)
if config.video_format == 'mp4':
fps = 60.
def f(t):
frame_length = len(visual_obs)
new_fps = 1./(1./fps + 1./frame_length)
idx = min(int(t*new_fps), frame_length-1)
return visual_obs[idx]
video = mpy.VideoClip(f, duration=len(visual_obs)/fps+2)
video.write_videofile(video_path, fps, verbose=False)
elif config.video_format == 'gif':
imageio.mimsave(video_path, visual_obs, fps=100)
logger.log('[{}] Episode Length: {}'.format(self._name, np.mean(ep_lens)))
logger.log('[{}] Episode Rewards: {}'.format(self._name, np.mean(ep_rets)))
return {'global/length': np.mean(ep_lens),
'global/reward': np.mean(ep_rets),
'global/success': np.mean(ep_success)}
|
[
"baselines.common.mpi_adam.MpiAdam",
"baselines.common.tf_util.get_session",
"baselines.common.tf_util.initialize",
"collections.defaultdict",
"tensorflow.assign",
"tensorflow.Variable",
"numpy.mean",
"numpy.linalg.norm",
"os.path.join",
"imageio.mimsave",
"baselines.common.tf_util.function",
"dataset.iterbatches",
"tensorflow.placeholder",
"tensorflow.cast",
"baselines.common.tf_util.flatgrad",
"baselines.common.tf_util.save_state",
"tqdm.trange",
"mpi4py.MPI.COMM_WORLD.Get_rank",
"baselines.common.colorize",
"os.makedirs",
"time.time",
"tensorflow.square"
] |
[((847, 914), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'dtype': 'tf.int64', 'trainable': '(False)'}), "(0, name='global_step', dtype=tf.int64, trainable=False)\n", (858, 914), True, 'import tensorflow as tf\n'), ((950, 999), 'tensorflow.assign', 'tf.assign', (['self.global_step', '(self.global_step + 1)'], {}), '(self.global_step, self.global_step + 1)\n', (959, 999), True, 'import tensorflow as tf\n'), ((1587, 1608), 'baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['pol_var_list'], {}), '(pol_var_list)\n', (1594, 1608), False, 'from baselines.common.mpi_adam import MpiAdam\n'), ((1936, 1972), 'baselines.common.tf_util.function', 'U.function', (['([ac] + pi.ob)', 'fetch_dict'], {}), '([ac] + pi.ob, fetch_dict)\n', (1946, 1972), True, 'import baselines.common.tf_util as U\n'), ((2780, 2794), 'baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (2792, 2794), True, 'import baselines.common.tf_util as U\n'), ((3395, 3412), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3406, 3412), False, 'from collections import defaultdict\n'), ((3458, 3473), 'baselines.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (3471, 3473), True, 'import baselines.common.tf_util as U\n'), ((5968, 5983), 'tqdm.trange', 'tqdm.trange', (['(10)'], {}), '(10)\n', (5979, 5983), False, 'import tqdm\n'), ((765, 790), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (788, 790), False, 'from mpi4py import MPI\n'), ((1799, 1872), 'baselines.common.tf_util.flatgrad', 'U.flatgrad', (['pol_loss', 'pol_var_list'], {'clip_norm': 'config.global_max_grad_norm'}), '(pol_loss, pol_var_list, clip_norm=config.global_max_grad_norm)\n', (1809, 1872), True, 'import baselines.common.tf_util as U\n'), ((2131, 2192), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""return"""'}), "(dtype=tf.float32, shape=[None], name='return')\n", (2145, 2192), True, 'import tensorflow as tf\n'), ((2307, 2327), 'baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['vf_var_list'], {}), '(vf_var_list)\n', (2314, 2327), False, 'from baselines.common.mpi_adam import MpiAdam\n'), ((2628, 2665), 'baselines.common.tf_util.function', 'U.function', (['([ret] + pi.ob)', 'fetch_dict'], {}), '([ret] + pi.ob, fetch_dict)\n', (2638, 2665), True, 'import baselines.common.tf_util as U\n'), ((3179, 3190), 'time.time', 'time.time', ([], {}), '()\n', (3188, 3190), False, 'import time\n'), ((5279, 5293), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (5286, 5293), True, 'import numpy as np\n'), ((5572, 5615), 'os.path.join', 'osp.join', (['self._config.log_dir', "('%.5d' % it)"], {}), "(self._config.log_dir, '%.5d' % it)\n", (5580, 5615), True, 'import os.path as osp\n'), ((5628, 5647), 'baselines.common.tf_util.save_state', 'U.save_state', (['fname'], {}), '(fname)\n', (5640, 5647), True, 'import baselines.common.tf_util as U\n'), ((5865, 5898), 'os.path.join', 'osp.join', (['config.log_dir', '"""video"""'], {}), "(config.log_dir, 'video')\n", (5873, 5898), True, 'import os.path as osp\n'), ((5911, 5949), 'os.makedirs', 'os.makedirs', (['record_dir'], {'exist_ok': '(True)'}), '(record_dir, exist_ok=True)\n', (5922, 5949), False, 'import os\n'), ((7526, 7542), 'numpy.mean', 'np.mean', (['ep_lens'], {}), '(ep_lens)\n', (7533, 7542), True, 'import numpy as np\n'), ((7577, 7593), 'numpy.mean', 'np.mean', (['ep_rets'], {}), '(ep_rets)\n', (7584, 7593), True, 'import numpy as np\n'), ((7629, 7648), 'numpy.mean', 'np.mean', (['ep_success'], {}), '(ep_success)\n', (7636, 7648), True, 'import numpy as np\n'), ((2365, 2390), 'tensorflow.square', 'tf.square', (['(pi.vpred - ret)'], {}), '(pi.vpred - ret)\n', (2374, 2390), True, 'import tensorflow as tf\n'), ((2479, 2550), 'baselines.common.tf_util.flatgrad', 'U.flatgrad', (['vf_loss', 'vf_var_list'], {'clip_norm': 'config.global_max_grad_norm'}), '(vf_loss, vf_var_list, clip_norm=config.global_max_grad_norm)\n', (2489, 2550), True, 'import baselines.common.tf_util as U\n'), ((3126, 3156), 'baselines.common.colorize', 'colorize', (['msg'], {'color': '"""magenta"""'}), "(msg, color='magenta')\n", (3134, 3156), False, 'from baselines.common import colorize\n'), ((3959, 4071), 'dataset.iterbatches', 'dataset.iterbatches', (['(ob, ac)'], {'include_final_partial_batch': '(False)', 'batch_size': 'self._config.global_batch_size'}), '((ob, ac), include_final_partial_batch=False, batch_size\n =self._config.global_batch_size)\n', (3978, 4071), False, 'import dataset\n'), ((6678, 6710), 'os.path.join', 'osp.join', (['record_dir', 'video_name'], {}), '(record_dir, video_name)\n', (6686, 6710), True, 'import os.path as osp\n'), ((7390, 7406), 'numpy.mean', 'np.mean', (['ep_lens'], {}), '(ep_lens)\n', (7397, 7406), True, 'import numpy as np\n'), ((7474, 7490), 'numpy.mean', 'np.mean', (['ep_rets'], {}), '(ep_rets)\n', (7481, 7490), True, 'import numpy as np\n'), ((1345, 1369), 'tensorflow.cast', 'tf.cast', (['var', 'tf.float32'], {}), '(var, tf.float32)\n', (1352, 1369), True, 'import tensorflow as tf\n'), ((2979, 3004), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (3002, 3004), False, 'from mpi4py import MPI\n'), ((4616, 4728), 'dataset.iterbatches', 'dataset.iterbatches', (['(ob, ret)'], {'include_final_partial_batch': '(False)', 'batch_size': 'self._config.global_batch_size'}), '((ob, ret), include_final_partial_batch=False,\n batch_size=self._config.global_batch_size)\n', (4635, 4728), False, 'import dataset\n'), ((4413, 4426), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (4420, 4426), True, 'import numpy as np\n'), ((4480, 4497), 'numpy.linalg.norm', 'np.linalg.norm', (['g'], {}), '(g)\n', (4494, 4497), True, 'import numpy as np\n'), ((7276, 7324), 'imageio.mimsave', 'imageio.mimsave', (['video_path', 'visual_obs'], {'fps': '(100)'}), '(video_path, visual_obs, fps=100)\n', (7291, 7324), False, 'import imageio\n'), ((3260, 3271), 'time.time', 'time.time', ([], {}), '()\n', (3269, 3271), False, 'import time\n'), ((5116, 5132), 'numpy.mean', 'np.mean', (['vf_loss'], {}), '(vf_loss)\n', (5123, 5132), True, 'import numpy as np\n'), ((5193, 5213), 'numpy.linalg.norm', 'np.linalg.norm', (['vf_g'], {}), '(vf_g)\n', (5207, 5213), True, 'import numpy as np\n')]
|
# GUI Tkinter grid file.
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from PIL import Image, ImageTk # pip install pillow (<- in terminal if not already installed)
import tkinter as tk
import csv
# OTHER PYTHON FILES (OURS)
import menuFunctions
import openData
# import moreFunctions
import pyodbc
# pip install wheel, then pip install pandas
import pandas as pd
# Root needs to be created FIRST
root = tk.Tk()
searchText = "ERROR"
def filterOptions():
global searchText
# Label Frame
filterOptionsFrame = tk.LabelFrame(root, text="Sort & Search", pady=5, padx=5)
filterOptionsFrame.pack(side="top", padx=10, pady=10, fill="both", expand="no")
# filterOptionsFrame.configure(bg="white")
# Filter label and drop down menu
# label
filterLabel = tk.Label(filterOptionsFrame, text="Sort:")
filterLabel.pack(side='left')
# Option/Drop menu
filters = [
'Department',
'GPA',
'Graduation Year',
'First Name Start',
'Last Name Start'
]
currentFilter = tk.StringVar()
currentFilter.set(filters[0])
filterMenu = tk.OptionMenu(filterOptionsFrame, currentFilter, *filters)
filterMenu.pack(side='left', padx=5)
filterMenu.config(bg="white", fg="black", width=17) # filterMenu settings
# Reset Filter button
button_resetFilter = tk.Button(filterOptionsFrame, text="Reset Sort", bg="light sky blue")
button_resetFilter.pack(side='left')
# Search Text Box
searchBox = Entry(filterOptionsFrame, borderwidth=2)
# Search entry box deletion
def deleteSearch():
searchBox.delete(0, END)
# Clear Search Button
button_clearSearch = tk.Button(filterOptionsFrame, text="CLEAR", bg="light sky blue", command=deleteSearch)
button_clearSearch.pack(side='right', padx=2)
# Search Button
button_search = tk.Button(filterOptionsFrame, text="SEARCH", bg="khaki1", command=openResults)
searchText = searchBox.get()
button_search.pack(side='right', padx=2)
# Search text box pack
searchBox.pack(side='right', padx=5)
# Search label
searchLabel = tk.Label(filterOptionsFrame, text="Search:")
searchLabel.pack(side='right')
#######################################################################################################
############### DATA TABLE & RELATED FUNCTIONS #######################################
#######################################################################################################
# Label Frame
dataTableFrame = tk.LabelFrame(root, text="Student Data", pady=2, padx=5, width=1300, height=1000)
dataScrollbarV = tk.Scrollbar(dataTableFrame, orient=VERTICAL)
dataScrollbarH = tk.Scrollbar(dataTableFrame, orient=HORIZONTAL)
dataListBox = Listbox(dataTableFrame, width=20, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxID = Listbox(dataTableFrame, width=3, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxEmail = Listbox(dataTableFrame, width=25, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxDepartment = Listbox(dataTableFrame, width=8, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxMajor = Listbox(dataTableFrame, width=15, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
dataListBoxDate = Listbox(dataTableFrame, width=8, yscrollcommand=dataScrollbarV.set, selectmode=BROWSE, exportselection=0)
def yview( *args):
dataListBox.yview(*args)
dataListBoxID.yview(*args)
dataListBoxEmail.yview(*args)
dataListBoxDepartment.yview(*args)
dataListBoxMajor.yview(*args)
dataListBoxDate.yview(*args)
dataScrollbarV.config(command=yview)
#dataScrollbarV.config(command=lambda:[dataListBox.yview(), dataListBoxID.yview(), dataListBoxEmail.yview(), dataListBoxDepartment.yview(), dataListBoxMajor.yview(), dataListBoxDate.yview()])
#dataScrollbarH.config(command=dataListBox.xview)
#################################################################
## VARIABLES ##
filePathCurrent = ""
studentList = []
# Display listbox onto GUI
def dataTablePack():
dataTableFrame.pack(anchor="n", padx=10, pady=1, fill="both", expand="yes")
dataListBox.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxID.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxEmail.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxDepartment.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxMajor.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataListBoxDate.pack(side=LEFT, pady=10, fill="both", expand="yes")
dataScrollbarV.pack(side=RIGHT, fill='y')
# Insert data from opened csv
def insertData():
global filePathCurrent
global studentList
deleteAll()
# Just so name is easier to use
filePath = openData.getFilePath()
filePathCurrent = filePath
# Opens chosen file
File = open(filePath)
Reader = csv.reader(File)
Data = list(Reader)
# Removes first line of file - Row filled with the Column titles
del(Data[0])
for x in list(range(0, len(Data))):
studentList.append(Data[x])
#dataListBox.insert(END, Data[x])
name = studentList[x][1] + ", " + studentList[x][0]
#formattedText = ('{:<20}{:>15}{:>50}'.format(name, studentList[x][2], studentList[x][4]))
#formattedText = (name + " " + studentList[x][2] + " " + studentList[x][3] + " " + studentList[x][4] + " " + studentList[x][5] + " " + studentList[x][6])
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentList[x][2]))
dataListBoxEmail.insert(END, (studentList[x][3]))
dataListBoxDepartment.insert(END, (studentList[x][4]))
dataListBoxMajor.insert(END, (studentList[x][5]))
dataListBoxDate.insert(END, (studentList[x][6]))
# For refreshing current open file
def insertDataRefresh():
global filePathCurrent
global studentList
deleteAll()
# Opens chosen file
File = open(filePathCurrent)
Reader = csv.reader(File)
Data = list(Reader)
del(Data[0])
for x in list(range(0, len(Data))):
studentList.append(Data[x])
name = studentList[x][1] + ", " + studentList[x][0]
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentList[x][2]))
dataListBoxEmail.insert(END, (studentList[x][3]))
dataListBoxDepartment.insert(END, (studentList[x][4]))
dataListBoxMajor.insert(END, (studentList[x][5]))
dataListBoxDate.insert(END, (studentList[x][6]))
## CREEATES DATATABLE AFTER PICKING FILE
def dataTable():
dataTablePack()
insertData()
# Deletes ONE student
def deleteOne():
global studentList
index = dataListBox.curselection()[0]
del studentList[index]
dataListBox.delete(index) # ANCHOR
dataListBoxID.delete(index)
dataListBoxEmail.delete(index)
dataListBoxDepartment.delete(index)
dataListBoxMajor.delete(index)
dataListBoxDate.delete(index)
dataListBox.config(text='')
# Clears Table
def deleteAll():
dataListBox.delete(0, END)
dataListBoxID.delete(0, END)
dataListBoxEmail.delete(0, END)
dataListBoxDepartment.delete(0, END)
dataListBoxMajor.delete(0, END)
dataListBoxDate.delete(0, END)
def select():
dataListBox.config(text=dataListBox.get(ANCHOR))
def saveFile():
global studentList
csvWrite = filedialog.asksaveasfile(mode='w', defaultextension=".csv", filetypes=(("CSV Files", "*.csv"), ("All Files", "*.*")))
if csvWrite is None: # When 'canceled' and no file saved
return
# with open(csvWrite, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(a)
text2save = str(dataListBox.get(0, END)) # starts from `1.0`, not `0.0`
csvWrite.write(text2save)
csvWrite.close()
def refreshTable():
deleteAll()
insertDataRefresh()
def updateStudent():
global studentList
'''
First_Name
Last_Name
Student_ID
Email
Department
Major
Grad_Date
'''
# Gets location of current selection
index = dataListBox.curselection()[0]
newWindow = Toplevel(root)
newWindow.title("Update Student")
newWindow.geometry("315x230")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=False, height=False) # Window size changeability
#Create Text Boxes
First_Name = Entry(newWindow, width = 30)
First_Name.grid(row = 0, column = 1, padx = 20, pady = (10, 0))
First_Name.insert(0, studentList[index][0])
Last_Name = Entry(newWindow, width = 30)
Last_Name.grid(row = 1, column = 1, padx = 20)
Last_Name.insert(0, studentList[index][1])
Student_ID = Entry(newWindow, width = 30)
Student_ID.grid(row = 2, column = 1, padx = 20)
Student_ID.insert(0, studentList[index][2])
Email = Entry(newWindow, width = 30)
Email.grid(row = 3, column = 1, padx = 20)
Email.insert(0, studentList[index][3])
Department = Entry(newWindow, width = 30)
Department.grid(row = 4, column = 1, padx = 20)
Department.insert(0, studentList[index][4])
Major = Entry(newWindow, width = 30)
Major.grid(row = 5, column = 1, padx = 20)
Major.insert(0, studentList[index][5])
Grad_Date = Entry(newWindow, width = 30)
Grad_Date.grid(row = 6, column = 1, padx = 20)
Grad_Date.insert(0, studentList[index][6])
#Create Text Box Labels
First_Name_Label = Label(newWindow, text = 'First Name')
First_Name_Label.grid(row = 0, column = 0, pady = (10, 0))
Last_Name_Label = Label(newWindow, text = 'Last Name')
Last_Name_Label.grid(row = 1, column = 0)
Student_ID_Label = Label(newWindow, text = 'Student ID')
Student_ID_Label.grid(row = 2, column = 0)
Email_Label = Label(newWindow, text = 'Email')
Email_Label.grid(row = 3, column = 0)
Department_Label = Label(newWindow, text = 'Department')
Department_Label.grid(row = 4, column = 0)
Major_Label = Label(newWindow, text = 'Major')
Major_Label.grid(row = 5, column = 0)
Grad_Date_Label = Label(newWindow, text = 'Grad Date')
Grad_Date_Label.grid(row = 6, column = 0)
goodUpdate_Label = Label(newWindow, text="* * *")
goodUpdate_Label.grid(row = 8, columnspan=2)
def retrieve_input(entryBox):
input = entryBox.get()
return input
def goodUpdate():
## Update student
# Get entered text
studentList[index][0] = retrieve_input(First_Name)
studentList[index][1] = retrieve_input(Last_Name)
studentList[index][2] = retrieve_input(Student_ID)
studentList[index][3] = retrieve_input(Email)
studentList[index][4] = retrieve_input(Department)
studentList[index][5] = retrieve_input(Major)
studentList[index][6] = retrieve_input(Grad_Date)
name = studentList[index][1] + ", " + studentList[index][0]
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentList[index][2]))
dataListBoxEmail.insert(END, (studentList[index][3]))
dataListBoxDepartment.insert(END, (studentList[index][4]))
dataListBoxMajor.insert(END, (studentList[index][5]))
dataListBoxDate.insert(END, (studentList[index][6]))
#name = studentList[index][1] + ", " + studentList[index][0]
#dataListBox.insert(END, Data[x])
#formattedText = str(name + " " + studentList[index][2] + " " + studentList[index][3] + " " + studentList[index][4] + " " + studentList[index][5] + " " + studentList[index][6])
#dataListBox.insert(index, (formattedText))
insertDataRefresh()
goodUpdate_Label.config(text="Successfull Update!")
#Create Update Button
Update_button = Button(newWindow, text = 'Update Student', bg="goldenrod1", command=goodUpdate)
Update_button.grid(row = 7, column = 0, columnspan = 2, pady = 10, padx = 10, ipadx = 100)
# print(index)
return None
def openResults():
global studentList
global searchText
newWindow = Toplevel(root)
newWindow.title("Search Results")
newWindow.geometry("315x170")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=False, height=False) # Window size changeability
#Create Text Boxes
First_Name = Entry(newWindow, width = 30)
First_Name.grid(row = 0, column = 1, padx = 20, pady = (10, 0))
#First_Name.insert(0, studentList[x][0])
Last_Name = Entry(newWindow, width = 30)
Last_Name.grid(row = 1, column = 1, padx = 20)
#Last_Name.insert(0, studentList[x][1])
Student_ID = Entry(newWindow, width = 30)
Student_ID.grid(row = 2, column = 1, padx = 20)
#Student_ID.insert(0, studentList[x][2])
Email = Entry(newWindow, width = 30)
Email.grid(row = 3, column = 1, padx = 20)
#Email.insert(0, studentList[x][3])
Department = Entry(newWindow, width = 30)
Department.grid(row = 4, column = 1, padx = 20)
#Department.insert(0, studentList[x][4])
Major = Entry(newWindow, width = 30)
Major.grid(row = 5, column = 1, padx = 20)
#Major.insert(0, studentList[x][5])
Grad_Date = Entry(newWindow, width = 30)
Grad_Date.grid(row = 6, column = 1, padx = 20)
#Grad_Date.insert(0, studentList[x][6])
#Create Text Box Labels
First_Name_Label = Label(newWindow, text = 'First Name')
First_Name_Label.grid(row = 0, column = 0, pady = (10, 0))
Last_Name_Label = Label(newWindow, text = 'Last Name')
Last_Name_Label.grid(row = 1, column = 0)
Student_ID_Label = Label(newWindow, text = 'Student ID')
Student_ID_Label.grid(row = 2, column = 0)
Email_Label = Label(newWindow, text = 'Email')
Email_Label.grid(row = 3, column = 0)
Department_Label = Label(newWindow, text = 'Department')
Department_Label.grid(row = 4, column = 0)
Major_Label = Label(newWindow, text = 'Major')
Major_Label.grid(row = 5, column = 0)
Grad_Date_Label = Label(newWindow, text = 'Grad Date')
Grad_Date_Label.grid(row = 6, column = 0)
#index = dataListBox.get(0, END).index(searchText)
#print(index)
#if (index == "ERROR"):
# filterOptions().searchBox.insert(0, "STRING NOT FOUND")
# Gets location of current selection
#index = dataListBox.curselection()[0]
def addStudent():
global studentList
'''
First_Name
Last_Name
Student_ID
Email
Department
Major
Grad_Date
'''
newWindow = Toplevel(root)
newWindow.title("Add Student")
newWindow.geometry("365x230")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=False, height=False) # Window size changeability
#Create Text Boxes
First_Name = Entry(newWindow, width = 30)
First_Name.grid(row = 0, column = 1, padx = 20, pady = (10, 0))
First_Name.insert(0, "FIRSTNAME")
Last_Name = Entry(newWindow, width = 30)
Last_Name.grid(row = 1, column = 1, padx = 20)
Last_Name.insert(0, "LASTNAME")
Student_ID = Entry(newWindow, width = 30)
Student_ID.grid(row = 2, column = 1, padx = 20)
Student_ID.insert(0, "#####")
Email = Entry(newWindow, width = 30)
Email.grid(row = 3, column = 1, padx = 20)
Email.insert(0, "<EMAIL>")
Department = Entry(newWindow, width = 30)
Department.grid(row = 4, column = 1, padx = 20)
Department.insert(0, "Business")
Major = Entry(newWindow, width = 30)
Major.grid(row = 5, column = 1, padx = 20)
Major.insert(0, "Finance")
Grad_Date = Entry(newWindow, width = 30)
Grad_Date.grid(row = 6, column = 1, padx = 20)
Grad_Date.insert(0, "##/##/20##")
#Create Text Box Labels
First_Name_Label = Label(newWindow, text = 'First Name')
First_Name_Label.grid(row = 0, column = 0, pady = (10, 0))
Last_Name_Label = Label(newWindow, text = 'Last Name')
Last_Name_Label.grid(row = 1, column = 0)
Student_ID_Label = Label(newWindow, text = 'Student ID')
Student_ID_Label.grid(row = 2, column = 0)
Email_Label = Label(newWindow, text = 'Email')
Email_Label.grid(row = 3, column = 0)
Department_Label = Label(newWindow, text = 'Department')
Department_Label.grid(row = 4, column = 0)
Major_Label = Label(newWindow, text = 'Major')
Major_Label.grid(row = 5, column = 0)
Grad_Date_Label = Label(newWindow, text = 'Grad Date')
Grad_Date_Label.grid(row = 6, column = 0)
goodAdd_Label = Label(newWindow, text="* * *")
goodAdd_Label.grid(row = 8, columnspan=2)
def retrieve_input(entryBox):
input = entryBox.get()
return input
# Button disables after a successfull addition
def goodAdd():
global filePathCurrent
global studentList
## Add student
# Get entered text
firstName = retrieve_input(First_Name)
lastName = retrieve_input(Last_Name)
studentid = retrieve_input(Student_ID)
email_ = retrieve_input(Email)
department_ = retrieve_input(Department)
major_ = retrieve_input(Major)
gradDate = retrieve_input(Grad_Date)
# Store into the table
gatheredText = [firstName, lastName, studentid, email_, department_, major_, gradDate]
studentList.append(gatheredText)
name = lastName + ", " + firstName
dataListBox.insert(END, (name))
dataListBoxID.insert(END, (studentid))
dataListBoxEmail.insert(END, (email_))
dataListBoxDepartment.insert(END, (department_))
dataListBoxMajor.insert(END, (major_))
dataListBoxDate.insert(END, (gradDate))
# formattedText = (lastName + ", " + firstName + " " + studentid + " " + email_ + " " + department_ + " " + major_ + " " + gradDate)
# dataListBox.insert(END, (formattedText))
# Confirmation & disbale button
goodAdd_Label.config(text="Successfull Add!")
Add_button.config(state=DISABLED)
# Create Add Button
Add_button = Button(newWindow, text = 'Add Student to Database', bg="SeaGreen1", command=goodAdd)
Add_button.grid(row = 7, column = 0, columnspan = 2, pady = 10, padx = 10, ipadx = 100)
# print(index)
return None
"""
# for r in range (rows):
# for c in range (cols):
canvas = tk.Canvas(dataTableFrame, bg="white", width=700, height=500)
canvas.pack(fill="both", expand="yes")
canvas2 = tk.Canvas(canvas, width=700, height=500)
canvas2.pack(side="left")
labelData = tk.Label(canvas2, text=(df.to_string()), bg="white")
labelData.grid(row=rows, column=cols)
scrollbar = tk.Scrollbar(canvas, command=canvas.yview)
scrollbar.pack(side="right", fill="y")
canvas.configure(yscrollcommand=scrollbar.set)
"""
#######################################################################################################
#######################################################################################################
def bottomButtons():
# Label Frame
bottomButtonsFrame = tk.LabelFrame(
root, text="Database Options", pady=5, padx=5)
bottomButtonsFrame.pack(side="bottom", padx=10, pady=10, fill="x", expand="no")
# Buttons
button_refresh = tk.Button(bottomButtonsFrame, text="Refresh Table", bg="light sky blue", command=refreshTable)
button_refresh.pack(side='left', padx=5)
button_save = tk.Button(bottomButtonsFrame, text="Save Current Database", bg="pale green", command=saveFile)
button_save.pack(side='left', padx=5)
#button_emailStudent = tk.Button(bottomButtonsFrame, text="Email Student(s)", bg="CadetBlue1")
#button_emailStudent.pack(side='left', padx=5)
button_add = tk.Button(bottomButtonsFrame, text="Add Student", bg="SeaGreen1", command=addStudent)
button_add.pack(side='right', padx=5)
button_update = tk.Button(bottomButtonsFrame, text="Update Student", bg="goldenrod1", command=updateStudent) # DarkSeaGreen1
button_update.pack(side='right', padx=5)
button_delete = tk.Button(bottomButtonsFrame, text="Delete Student", bg="IndianRed1", command=deleteOne)
button_delete.pack(side='right', padx=5)
button_clearTable = tk.Button(bottomButtonsFrame, text="CLEAR Table", bg="yellow2", command=deleteAll)
button_clearTable.pack(side='right', padx=5)
def userGuide():
newWindow = Toplevel(root)
newWindow.title("About Studabase")
newWindow.geometry("500x500")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=True, height=True) # Window size changeability
about_Label = Label(newWindow, text = "STUDABASE (stoo-da-base) is a GUI style student database organizational software that allows its users to:" + '\n' + "Take data from a MySQL database and translate it to a GUI system." + '\n' + "Sort data by fields such as student ID, first and last name, email, department, etc." + '\n' + "Add and remove students as well as search for specific ones." + '\n' + "Restrict displayed data through various filters.")
about_Label.grid(row = 0, column = 0, pady = (10, 0))
def aboutStudabase():
newWindow = Toplevel(root)
newWindow.title("About Studabase")
newWindow.geometry("500x800")
newWindow.iconbitmap('hat.ico')
newWindow.resizable(width=True, height=True) # Window size changeability
about_Label = Label(newWindow, text = "SRDG - STUDABASE: The Student Database (Stoo-da-base)" + '\n' + "<NAME> <EMAIL>" + '\n' + "<NAME> <EMAIL>" + '\n' + "<NAME> <EMAIL>" + '\n' + "<NAME> <EMAIL>")
about_Label.grid(row = 0, column = 0, pady = (10, 0))
def mainWindow():
# Root Options
root.title("STUDABASE: The Student Database ")
# Icon - .ico file should be in the same directory as this file.
root.iconbitmap('hat.ico')
# Window Size: root.geometry('500x600')
root.geometry('800x800')
# Stops windows size from being changeable
root.resizable(width=True, height=True)
# root.configure(bg = 'gray24')
# MENU BAR
menubar = tk.Menu(root)
# File - Menu Bar
fileMenu = tk.Menu(menubar, tearoff=0)
#fileMenu.add_command(label="New Database", command=menuFunctions.placeHolderFunc)
fileMenu.add_command(label="Open Database", command=insertData)
fileMenu.add_command(label="Save As...(Current Database)", command=saveFile)
fileMenu.add_separator()
#fileMenu.add_command(label="Properties...", command=menuFunctions.placeHolderFunc)
#fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=fileMenu)
# Edit - Menu Bar
editMenu = tk.Menu(menubar, tearoff=0)
editMenu.add_command(label="Refresh Database", command=refreshTable)
editMenu.add_separator()
#editMenu.add_command(label="Select All", command=menuFunctions.placeHolderFunc)
editMenu.add_separator()
editMenu.add_command(label="Add Student", command=addStudent)
editMenu.add_command(label="Delete Student(s)", command=deleteOne)
editMenu.add_separator()
menubar.add_cascade(label="Edit", menu=editMenu)
# View - Menu Bar
#viewMenu = tk.Menu(menubar, tearoff=0)
#viewMenu.add_command(label="Choice 1", command=menuFunctions.placeHolderFunc)
#viewMenu.add_command(label="Choice 2", command=menuFunctions.placeHolderFunc)
#viewMenu.add_separator()
#viewMenu.add_command(label="Choice 3", command=menuFunctions.placeHolderFunc)
#viewMenu.add_command(label="Choice 4", command=menuFunctions.placeHolderFunc)
#menubar.add_cascade(label="View", menu=viewMenu)
# Settings - Menu Bar
#settingsMenu = tk.Menu(menubar, tearoff=0)
# Change id & pass for current database
#settingsMenu.add_command(label="Database Settings", command=menuFunctions.placeHolderFunc)
# Change email platform/tool
#settingsMenu.add_command(label="Email Platform", command=menuFunctions.placeHolderFunc)
# Block changes - disables adding and deleting students (basically a read only mode)
#settingsMenu.add_command( label="View Only Mode", command=menuFunctions.placeHolderFunc)
# settingsMenu.add_separator()
#menubar.add_cascade(label="Settings", menu=settingsMenu)
# Help - Menu Bar
helpmenu = tk.Menu(menubar, tearoff=0)
# Display guide on how to use STUDABASE
helpmenu.add_command(label="User Guide", command=userGuide)
# Display info abut STUDABASE - Creators, when made, etc.
helpmenu.add_command(label="About STUDABASE", command=aboutStudabase)
# helpmenu.add_separator()
menubar.add_cascade(label="Help", menu=helpmenu)
filterOptions()
dataTable()
bottomButtons()
# Needed for Menu bar
root.config(menu=menubar)
# GUI program is constantly looping to check for changes - Loop created
root.mainloop() # THIS SHOULD BE THE LAST LINE
|
[
"tkinter.StringVar",
"tkinter.LabelFrame",
"csv.reader",
"tkinter.Menu",
"tkinter.Button",
"openData.getFilePath",
"tkinter.Scrollbar",
"tkinter.OptionMenu",
"tkinter.filedialog.asksaveasfile",
"tkinter.Label",
"tkinter.Tk"
] |
[((439, 446), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (444, 446), True, 'import tkinter as tk\n'), ((2598, 2684), 'tkinter.LabelFrame', 'tk.LabelFrame', (['root'], {'text': '"""Student Data"""', 'pady': '(2)', 'padx': '(5)', 'width': '(1300)', 'height': '(1000)'}), "(root, text='Student Data', pady=2, padx=5, width=1300, height\n =1000)\n", (2611, 2684), True, 'import tkinter as tk\n'), ((2702, 2747), 'tkinter.Scrollbar', 'tk.Scrollbar', (['dataTableFrame'], {'orient': 'VERTICAL'}), '(dataTableFrame, orient=VERTICAL)\n', (2714, 2747), True, 'import tkinter as tk\n'), ((2765, 2812), 'tkinter.Scrollbar', 'tk.Scrollbar', (['dataTableFrame'], {'orient': 'HORIZONTAL'}), '(dataTableFrame, orient=HORIZONTAL)\n', (2777, 2812), True, 'import tkinter as tk\n'), ((561, 618), 'tkinter.LabelFrame', 'tk.LabelFrame', (['root'], {'text': '"""Sort & Search"""', 'pady': '(5)', 'padx': '(5)'}), "(root, text='Sort & Search', pady=5, padx=5)\n", (574, 618), True, 'import tkinter as tk\n'), ((819, 861), 'tkinter.Label', 'tk.Label', (['filterOptionsFrame'], {'text': '"""Sort:"""'}), "(filterOptionsFrame, text='Sort:')\n", (827, 861), True, 'import tkinter as tk\n'), ((1080, 1094), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (1092, 1094), True, 'import tkinter as tk\n'), ((1147, 1205), 'tkinter.OptionMenu', 'tk.OptionMenu', (['filterOptionsFrame', 'currentFilter', '*filters'], {}), '(filterOptionsFrame, currentFilter, *filters)\n', (1160, 1205), True, 'import tkinter as tk\n'), ((1378, 1447), 'tkinter.Button', 'tk.Button', (['filterOptionsFrame'], {'text': '"""Reset Sort"""', 'bg': '"""light sky blue"""'}), "(filterOptionsFrame, text='Reset Sort', bg='light sky blue')\n", (1387, 1447), True, 'import tkinter as tk\n'), ((1726, 1817), 'tkinter.Button', 'tk.Button', (['filterOptionsFrame'], {'text': '"""CLEAR"""', 'bg': '"""light sky blue"""', 'command': 'deleteSearch'}), "(filterOptionsFrame, text='CLEAR', bg='light sky blue', command=\n deleteSearch)\n", (1735, 1817), True, 'import tkinter as tk\n'), ((1903, 1981), 'tkinter.Button', 'tk.Button', (['filterOptionsFrame'], {'text': '"""SEARCH"""', 'bg': '"""khaki1"""', 'command': 'openResults'}), "(filterOptionsFrame, text='SEARCH', bg='khaki1', command=openResults)\n", (1912, 1981), True, 'import tkinter as tk\n'), ((2170, 2214), 'tkinter.Label', 'tk.Label', (['filterOptionsFrame'], {'text': '"""Search:"""'}), "(filterOptionsFrame, text='Search:')\n", (2178, 2214), True, 'import tkinter as tk\n'), ((4970, 4992), 'openData.getFilePath', 'openData.getFilePath', ([], {}), '()\n', (4990, 4992), False, 'import openData\n'), ((5087, 5103), 'csv.reader', 'csv.reader', (['File'], {}), '(File)\n', (5097, 5103), False, 'import csv\n'), ((6192, 6208), 'csv.reader', 'csv.reader', (['File'], {}), '(File)\n', (6202, 6208), False, 'import csv\n'), ((7606, 7728), 'tkinter.filedialog.asksaveasfile', 'filedialog.asksaveasfile', ([], {'mode': '"""w"""', 'defaultextension': '""".csv"""', 'filetypes': "(('CSV Files', '*.csv'), ('All Files', '*.*'))"}), "(mode='w', defaultextension='.csv', filetypes=((\n 'CSV Files', '*.csv'), ('All Files', '*.*')))\n", (7630, 7728), False, 'from tkinter import filedialog\n'), ((19361, 19421), 'tkinter.LabelFrame', 'tk.LabelFrame', (['root'], {'text': '"""Database Options"""', 'pady': '(5)', 'padx': '(5)'}), "(root, text='Database Options', pady=5, padx=5)\n", (19374, 19421), True, 'import tkinter as tk\n'), ((19551, 19649), 'tkinter.Button', 'tk.Button', (['bottomButtonsFrame'], {'text': '"""Refresh Table"""', 'bg': '"""light sky blue"""', 'command': 'refreshTable'}), "(bottomButtonsFrame, text='Refresh Table', bg='light sky blue',\n command=refreshTable)\n", (19560, 19649), True, 'import tkinter as tk\n'), ((19710, 19808), 'tkinter.Button', 'tk.Button', (['bottomButtonsFrame'], {'text': '"""Save Current Database"""', 'bg': '"""pale green"""', 'command': 'saveFile'}), "(bottomButtonsFrame, text='Save Current Database', bg='pale green',\n command=saveFile)\n", (19719, 19808), True, 'import tkinter as tk\n'), ((20020, 20110), 'tkinter.Button', 'tk.Button', (['bottomButtonsFrame'], {'text': '"""Add Student"""', 'bg': '"""SeaGreen1"""', 'command': 'addStudent'}), "(bottomButtonsFrame, text='Add Student', bg='SeaGreen1', command=\n addStudent)\n", (20029, 20110), True, 'import tkinter as tk\n'), ((20169, 20265), 'tkinter.Button', 'tk.Button', (['bottomButtonsFrame'], {'text': '"""Update Student"""', 'bg': '"""goldenrod1"""', 'command': 'updateStudent'}), "(bottomButtonsFrame, text='Update Student', bg='goldenrod1',\n command=updateStudent)\n", (20178, 20265), True, 'import tkinter as tk\n'), ((20345, 20437), 'tkinter.Button', 'tk.Button', (['bottomButtonsFrame'], {'text': '"""Delete Student"""', 'bg': '"""IndianRed1"""', 'command': 'deleteOne'}), "(bottomButtonsFrame, text='Delete Student', bg='IndianRed1',\n command=deleteOne)\n", (20354, 20437), True, 'import tkinter as tk\n'), ((20504, 20591), 'tkinter.Button', 'tk.Button', (['bottomButtonsFrame'], {'text': '"""CLEAR Table"""', 'bg': '"""yellow2"""', 'command': 'deleteAll'}), "(bottomButtonsFrame, text='CLEAR Table', bg='yellow2', command=\n deleteAll)\n", (20513, 20591), True, 'import tkinter as tk\n'), ((22356, 22369), 'tkinter.Menu', 'tk.Menu', (['root'], {}), '(root)\n', (22363, 22369), True, 'import tkinter as tk\n'), ((22407, 22434), 'tkinter.Menu', 'tk.Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (22414, 22434), True, 'import tkinter as tk\n'), ((22967, 22994), 'tkinter.Menu', 'tk.Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (22974, 22994), True, 'import tkinter as tk\n'), ((24576, 24603), 'tkinter.Menu', 'tk.Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (24583, 24603), True, 'import tkinter as tk\n')]
|
'''
Lo que hace este script es bajar la data de https://www.coingecko.com por medio de su API con ayuda del
data.JSON, el cual funge como una lista de las crypto que acutlamente tradeo, esa lista es actualizada
manualmente. En cuanto a los precios, no baja margenes de precio de entrada y cierre solo es data para una previzualisacion general
de las graficas
'''
from bin import pd,np,time,json,CoinGeckoAPI
from bin import constant as c
path = c.PATHCOINGECKO
#esto no lo borre por que no me afecta
save_file = open(path + '/cardano.txt','w')
#aqui esta la lista de monedas que tradeo
lista_cryptos = c.PATHJSON
def get_lista_json(_path_):
_lista_ = open(_path_,"r")
_data_ = json.load(_lista_)
api_call_crypto_name, save_name_file = [],[]
for i in _data_['crypto_trade'][0]:
save_name_file.append(i.lower())
api_call_crypto_name.append(_data_['crypto_trade'][0][i])
df = pd.DataFrame(columns=["name_file","crypto_api_name"])
df['name_file'] = save_name_file
df['crypto_api_name'] = api_call_crypto_name
return df
def api_gecko(path_,output_file_,_id_,name_save_file):
cg = CoinGeckoAPI()
sb = cg.ping()
status_gecko = False
status_gecko = True
resultado = cg.get_coin_market_chart_by_id(id=str(_id_),vs_currency='usd',days='365')
fecha = []
tiempo = []
precios = []
for i in range(len(resultado['prices'])):
fecha.append(time.strftime('%Y.%m.%d',time.localtime((resultado['prices'][i][0])/1000)))
tiempo.append(time.strftime('%H:%M:%S',time.localtime((resultado['prices'][i][0])/1000)))
precios.append(resultado['prices'][i][1])
data_f = np.array(fecha)
data_t = np.array(tiempo)
data_p = np.array(precios)
data_ = {'FECHA':data_f,'HORA':data_t,'PRECIO':data_p}
df = pd.DataFrame(data=data_)
df.to_csv(path_ + '/' + str(name_save_file) + '.csv')
return status_gecko
def startDownload():
data = get_lista_json(lista_cryptos)
try:
for index in range(len(data)):
name_file = data['name_file'].iloc[index]
id = data['crypto_api_name'].iloc[index]
print(str(id),str(name_file))
api_gecko(path,save_file,_id_=str(id),name_save_file=str(name_file))
print('<<<<<<< ' + str(id) + ' >>>>>>>>>')
except Exception as error:
print(error)
#status_gecko = api_gecko(path,save_file)
|
[
"bin.time.localtime",
"bin.np.array",
"bin.CoinGeckoAPI",
"bin.pd.DataFrame",
"bin.json.load"
] |
[((688, 706), 'bin.json.load', 'json.load', (['_lista_'], {}), '(_lista_)\n', (697, 706), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((912, 966), 'bin.pd.DataFrame', 'pd.DataFrame', ([], {'columns': "['name_file', 'crypto_api_name']"}), "(columns=['name_file', 'crypto_api_name'])\n", (924, 966), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1131, 1145), 'bin.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (1143, 1145), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1663, 1678), 'bin.np.array', 'np.array', (['fecha'], {}), '(fecha)\n', (1671, 1678), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1692, 1708), 'bin.np.array', 'np.array', (['tiempo'], {}), '(tiempo)\n', (1700, 1708), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1722, 1739), 'bin.np.array', 'np.array', (['precios'], {}), '(precios)\n', (1730, 1739), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1808, 1832), 'bin.pd.DataFrame', 'pd.DataFrame', ([], {'data': 'data_'}), '(data=data_)\n', (1820, 1832), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1450, 1498), 'bin.time.localtime', 'time.localtime', (["(resultado['prices'][i][0] / 1000)"], {}), "(resultado['prices'][i][0] / 1000)\n", (1464, 1498), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n'), ((1548, 1596), 'bin.time.localtime', 'time.localtime', (["(resultado['prices'][i][0] / 1000)"], {}), "(resultado['prices'][i][0] / 1000)\n", (1562, 1596), False, 'from bin import pd, np, time, json, CoinGeckoAPI\n')]
|
from typing import Union
from selenium.webdriver.support.ui import Select as SelSelect
from ..actor import Actor
from ..pacing import beat, MINOR
from ..target import Target
class Select:
"""
Selects an option from a dropdown menu. This is a superclass that will
create the correct specific Select action that will need to be used,
depending on how the option needs to be selected. Some examples of
invocations:
Select.the_option_named("January").from_the(MONTH_DROPDOWN)
Select.the_option_at_index(0).from_the(MONTH_DROPDOWN)
Select.the_option_with_value("jan").from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
@staticmethod
def the_option_named(text: str) -> "SelectByText":
"""
Instantiate a |SelectByText| class which will select the option
with the given text.
Args:
text (str): The text of the option to select.
Returns:
|SelectByText|
"""
return SelectByText(text)
@staticmethod
def the_option_at_index(index: Union[int, str]) -> "SelectByIndex":
"""
Instantiate a |SelectByIndex| class which will select the option
at the specified index. This index is 0-based.
Args:
index (Union[int, str]): The index (0-based) of the option to
select.
Returns:
|SelectByIndex|
"""
return SelectByIndex(index)
@staticmethod
def the_option_with_value(value: str) -> "SelectByValue":
"""
Instantiate a |SelectByText| class which will select the option
with the given text.
Args:
value (str): The text of the option to select.
Returns:
|SelectByText|
"""
return SelectByValue(value)
class SelectByText:
"""
A specialized Select action that chooses the option by text. This
class is meant to be accessed via the Select action's static
|Select.the_option_named| method. A typical invocation might look
like:
Select.the_option_named("January").from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
def from_the(self, target: Target) -> "SelectByText":
"""
Provides the |Target| to select the option from.
Args:
target (Target): The |Target| describing the dropdown element
to select from
Returns:
|SelectByText|
"""
self.target = target
return self
def from_(self, target: Target) -> "SelectByText":
"""Syntactic sugar for |SelectByText.from_the|."""
return self.from_the(target)
@beat("{0} selects the option '{text}' from the {target}.", gravitas=MINOR)
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to attempt to find the dropdown element described
by the stored target, then performs the select action.
Args:
the_actor (Actor): The |Actor| who will perform the action.
Raises:
|UnableToPerformException|: if the actor does not have the
ability to |BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
select = SelSelect(element)
select.select_by_visible_text(self.text)
def __init__(self, text: str, target: Target = None) -> None:
self.target = target
self.text = text
class SelectByIndex:
"""
A specialized |Select| action that chooses the option by its index.
This class is meant to be accessed via the Select action's static
|Select.the_option_at_index| method. A typical invocation might look
like:
Select.the_option_at_index(0).from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
def from_the(self, target: Target) -> "SelectByIndex":
"""
Provides the |Target| to select the option from.
Args:
target (Target): The |Target| describing the dropdown element
to select from
Returns:
|SelectByIndex|
"""
self.target = target
return self
def from_(self, target: Target) -> "SelectByIndex":
"""Syntactic sugar for |SelectByIndex.from_the|."""
return self.from_the(target)
@beat("{0} selects the option at index {index} from the {target}.", gravitas=MINOR)
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to attempt to find the dropdown element described
by the stored target, then performs the select action.
Args:
the_actor (Actor): The |Actor| who will perform the
action.
Raises:
|UnableToPerformException|: if the actor does not have the
ability to |BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
select = SelSelect(element)
select.select_by_index(self.index)
def __init__(self, index: Union[int, str], target: Target = None) -> None:
self.target = target
self.index = str(index)
class SelectByValue:
"""
A specialized Select action that chooses the option by its value. This
class is meant to be accessed via the Select action's static
|Select.the_option_with_value| method. A typical invocation might look
like:
Select.the_option_with_value("jan").from_the(MONTH_DROPDOWN)
It can then be passed along to the |Actor| to perform the action.
"""
def from_the(self, target: Target) -> "SelectByValue":
"""
Provides the |Target| to select the option from.
Args:
target (Target): The |Target| describing the dropdown element
to select from
Returns:
|SelectByValue|
"""
self.target = target
return self
def from_(self, target: Target) -> "SelectByValue":
"""Syntactic sugar for |SelectByValue.from_the|."""
return self.from_the(target)
@beat(
"{0} selects the option with value '{value}' from the {target}.", gravitas=MINOR
)
def perform_as(self, the_actor: Actor) -> None:
"""
Asks the actor to attempt to find the dropdown element described
by the stored target, then performs the select action.
Args:
the_actor (Actor): The |Actor| who will perform the action.
Raises:
|UnableToPerformException|: if the actor does not have the
ability to |BrowseTheWeb|.
"""
element = self.target.found_by(the_actor)
select = SelSelect(element)
select.select_by_value(self.value)
def __init__(self, value: Union[int, str], target: Target = None) -> None:
self.target = target
self.value = str(value)
|
[
"selenium.webdriver.support.ui.Select"
] |
[((3343, 3361), 'selenium.webdriver.support.ui.Select', 'SelSelect', (['element'], {}), '(element)\n', (3352, 3361), True, 'from selenium.webdriver.support.ui import Select as SelSelect\n'), ((5043, 5061), 'selenium.webdriver.support.ui.Select', 'SelSelect', (['element'], {}), '(element)\n', (5052, 5061), True, 'from selenium.webdriver.support.ui import Select as SelSelect\n'), ((6765, 6783), 'selenium.webdriver.support.ui.Select', 'SelSelect', (['element'], {}), '(element)\n', (6774, 6783), True, 'from selenium.webdriver.support.ui import Select as SelSelect\n')]
|
import pymc as pm
import matplotlib.pyplot as plt
import numpy as np
plt.rc('font', family='Malgun Gothic')
lambda_ = pm.Exponential("poisson_param", 1)
data_generator = pm.Poisson("data_generater", lambda_)
data_plus_one = data_generator + 1
print(lambda_.children)
print(data_generator.parents)
# value
print(lambda_.value)
betas = pm.Uniform("betas", 0, 1, size=5)
betas.value
## random
ld1 = pm.Exponential("lambda_1", 1) # first 행동의 prior
ld2 = pm.Exponential("lambda_2", 1) # second 행동의 prior
tau = pm.DiscreteUniform("tau", lower=0, upper=10) # 행동 변화에 대한 prior
print("init")
print(ld1.value)
print(ld2.value)
print(tau.value)
print(ld1.random(), ld2.random(), tau.random())
print("random call")
print(ld1.value)
print(ld2.value)
print(tau.value)
n_data_points = 5
@pm.deterministic
def labmda_(tau=tau, lambda_1=ld1, lambda_2=ld2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1
out[tau:] = lambda_2
return out
####################################################
#### 모델에 관측 포함 ####
figsize = (12.5, 4)
plt.figure(figsize=figsize)
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 300
samples = [ld1.random() for i in range(20000)]
plt.hist(samples, bins=70, normed=True, histtype="stepfilled")
plt.xlim(0, 8)
plt.show()
# 고정 밸류
data = np.array([10, 25, 15, 20, 35])
obs = pm.Poisson("obs", lambda_, value=data, observed=True)
obs.value
##################
##### 모델링 #####
tau = pm.rdiscrete_uniform(0, 80)
alpha = 1./20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
lambda_ = np.r_[lambda_1*np.ones(tau), lambda_2*np.ones(80-tau)]
data = pm.rpoisson(lambda_)
plt.bar(np.arange(80), data, color="#348ABD")
plt.bar(tau-1, data[tau-1], color='r', label='행동변화')
plt.xlable("time")
plt.ylabel("message")
plt.xlim(0, 80)
plt.legend()
|
[
"pymc.Poisson",
"matplotlib.pyplot.bar",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"pymc.rexponential",
"pymc.rpoisson",
"matplotlib.pyplot.rc",
"pymc.DiscreteUniform",
"pymc.Exponential",
"matplotlib.pyplot.show",
"pymc.Uniform",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlable",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"pymc.rdiscrete_uniform",
"matplotlib.pyplot.hist",
"numpy.zeros",
"numpy.array"
] |
[((70, 108), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Malgun Gothic"""'}), "('font', family='Malgun Gothic')\n", (76, 108), True, 'import matplotlib.pyplot as plt\n'), ((119, 153), 'pymc.Exponential', 'pm.Exponential', (['"""poisson_param"""', '(1)'], {}), "('poisson_param', 1)\n", (133, 153), True, 'import pymc as pm\n'), ((171, 208), 'pymc.Poisson', 'pm.Poisson', (['"""data_generater"""', 'lambda_'], {}), "('data_generater', lambda_)\n", (181, 208), True, 'import pymc as pm\n'), ((338, 371), 'pymc.Uniform', 'pm.Uniform', (['"""betas"""', '(0)', '(1)'], {'size': '(5)'}), "('betas', 0, 1, size=5)\n", (348, 371), True, 'import pymc as pm\n'), ((401, 430), 'pymc.Exponential', 'pm.Exponential', (['"""lambda_1"""', '(1)'], {}), "('lambda_1', 1)\n", (415, 430), True, 'import pymc as pm\n'), ((455, 484), 'pymc.Exponential', 'pm.Exponential', (['"""lambda_2"""', '(1)'], {}), "('lambda_2', 1)\n", (469, 484), True, 'import pymc as pm\n'), ((510, 554), 'pymc.DiscreteUniform', 'pm.DiscreteUniform', (['"""tau"""'], {'lower': '(0)', 'upper': '(10)'}), "('tau', lower=0, upper=10)\n", (528, 554), True, 'import pymc as pm\n'), ((1041, 1068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1051, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1183, 1245), 'matplotlib.pyplot.hist', 'plt.hist', (['samples'], {'bins': '(70)', 'normed': '(True)', 'histtype': '"""stepfilled"""'}), "(samples, bins=70, normed=True, histtype='stepfilled')\n", (1191, 1245), True, 'import matplotlib.pyplot as plt\n'), ((1246, 1260), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(8)'], {}), '(0, 8)\n', (1254, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1269, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1319), 'numpy.array', 'np.array', (['[10, 25, 15, 20, 35]'], {}), '([10, 25, 15, 20, 35])\n', (1297, 1319), True, 'import numpy as np\n'), ((1326, 1379), 'pymc.Poisson', 'pm.Poisson', (['"""obs"""', 'lambda_'], {'value': 'data', 'observed': '(True)'}), "('obs', lambda_, value=data, observed=True)\n", (1336, 1379), True, 'import pymc as pm\n'), ((1433, 1460), 'pymc.rdiscrete_uniform', 'pm.rdiscrete_uniform', (['(0)', '(80)'], {}), '(0, 80)\n', (1453, 1460), True, 'import pymc as pm\n'), ((1497, 1522), 'pymc.rexponential', 'pm.rexponential', (['alpha', '(2)'], {}), '(alpha, 2)\n', (1512, 1522), True, 'import pymc as pm\n'), ((1595, 1615), 'pymc.rpoisson', 'pm.rpoisson', (['lambda_'], {}), '(lambda_)\n', (1606, 1615), True, 'import pymc as pm\n'), ((1662, 1718), 'matplotlib.pyplot.bar', 'plt.bar', (['(tau - 1)', 'data[tau - 1]'], {'color': '"""r"""', 'label': '"""행동변화"""'}), "(tau - 1, data[tau - 1], color='r', label='행동변화')\n", (1669, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1733), 'matplotlib.pyplot.xlable', 'plt.xlable', (['"""time"""'], {}), "('time')\n", (1725, 1733), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1755), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""message"""'], {}), "('message')\n", (1744, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1771), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(80)'], {}), '(0, 80)\n', (1764, 1771), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1784), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1782, 1784), True, 'import matplotlib.pyplot as plt\n'), ((858, 881), 'numpy.zeros', 'np.zeros', (['n_data_points'], {}), '(n_data_points)\n', (866, 881), True, 'import numpy as np\n'), ((1624, 1637), 'numpy.arange', 'np.arange', (['(80)'], {}), '(80)\n', (1633, 1637), True, 'import numpy as np\n'), ((1548, 1560), 'numpy.ones', 'np.ones', (['tau'], {}), '(tau)\n', (1555, 1560), True, 'import numpy as np\n'), ((1571, 1588), 'numpy.ones', 'np.ones', (['(80 - tau)'], {}), '(80 - tau)\n', (1578, 1588), True, 'import numpy as np\n')]
|
import re
from django.conf.urls import url, patterns, include
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
from dicom_review.views import review
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', review),
url(r'^login/$', 'django.contrib.auth.views.login', name="login"),
# Administrative components
url(r'^admin/', include(admin.site.urls)),
)
# In production, these two locations must be served up statically
urlpatterns += patterns('django.views.static',
url(r'^%s(?P<path>.*)$' % re.escape(settings.MEDIA_URL.lstrip('/')), 'serve', {
'document_root': settings.MEDIA_ROOT
}),
url(r'^%s(?P<path>.*)$' % re.escape(settings.STATIC_URL.lstrip('/')), 'serve', {
'document_root': settings.STATIC_ROOT
}),
)
|
[
"django.contrib.admin.autodiscover",
"django.conf.urls.include",
"django.conf.settings.STATIC_URL.lstrip",
"django.conf.urls.url",
"django.conf.settings.MEDIA_URL.lstrip"
] |
[((213, 233), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (231, 233), False, 'from django.contrib import admin\n'), ((266, 283), 'django.conf.urls.url', 'url', (['"""^$"""', 'review'], {}), "('^$', review)\n", (269, 283), False, 'from django.conf.urls import url, patterns, include\n'), ((290, 354), 'django.conf.urls.url', 'url', (['"""^login/$"""', '"""django.contrib.auth.views.login"""'], {'name': '"""login"""'}), "('^login/$', 'django.contrib.auth.views.login', name='login')\n", (293, 354), False, 'from django.conf.urls import url, patterns, include\n'), ((409, 433), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (416, 433), False, 'from django.conf.urls import url, patterns, include\n'), ((592, 622), 'django.conf.settings.MEDIA_URL.lstrip', 'settings.MEDIA_URL.lstrip', (['"""/"""'], {}), "('/')\n", (617, 622), False, 'from django.conf import settings\n'), ((729, 760), 'django.conf.settings.STATIC_URL.lstrip', 'settings.STATIC_URL.lstrip', (['"""/"""'], {}), "('/')\n", (755, 760), False, 'from django.conf import settings\n')]
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_driver_location = "E:\Development\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_location)
driver.get("https://tinder.com/app/recs")
base_window = driver.window_handles[0]
login = driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div/div/header/div/div[2]/div[2]/button')
login.click()
time.sleep(5)
google = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[1]/div/div[3]/span/div[1]/div/button')
google.click()
time.sleep(5)
google_login_window = driver.window_handles[1]
driver.switch_to.window(google_login_window)
google_email = driver.find_element_by_xpath('/html/body/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div[1]/div/form/span/section/div/div/div[1]/div/div[1]/div/div[1]/input')
google_email.send_keys("<EMAIL>")
next_button_username = driver.find_element_by_xpath('//*[@id="identifierNext"]/div/button')
next_button_username.click()
time.sleep(5)
google_password = driver.find_element_by_xpath('//*[@id="password"]/div[1]/div/div[1]/input')
google_password.send_keys("<PASSWORD>")
next_button_password = driver.find_element_by_xpath('//*[@id="passwordNext"]/div/button')
next_button_password.click()
time.sleep(5)
driver.switch_to.window(base_window)
allow_location_button = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]')
allow_location_button.click()
not_interested = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[2]')
not_interested.click()
cookies = driver.find_element_by_xpath('//*[@id="content"]/div/div[2]/div/div/div[1]/button')
cookies.click()
time.sleep(5)
body = driver.find_element_by_xpath('//*[@id="Tinder"]/body')
for i in range(0, 101):
time.sleep(5)
body.send_keys(Keys.ARROW_RIGHT)
|
[
"selenium.webdriver.Chrome",
"time.sleep"
] |
[((159, 215), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'chrome_driver_location'}), '(executable_path=chrome_driver_location)\n', (175, 215), False, 'from selenium import webdriver\n'), ((446, 459), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (456, 459), False, 'import time\n'), ((590, 603), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (600, 603), False, 'import time\n'), ((1032, 1045), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1042, 1045), False, 'import time\n'), ((1301, 1314), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1311, 1314), False, 'import time\n'), ((1738, 1751), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1748, 1751), False, 'import time\n'), ((1844, 1857), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1854, 1857), False, 'import time\n')]
|
import sys
from src.bot import logger
from src.bot.messages import (
AVAILABLE_COMMANDS_MESSAGE,
GREETINGS_MESSAGE,
NON_INFORMED_CHANNEL_ID_MESSAGE,
SUBSCRIPTION_ERROR_MESSAGE,
SUBSCRIPTION_MESSAGE,
UNKNOWN_MESSAGE,
)
from src.bot.requester import subscribe_in_pubsubhubbub
from src.database.utils import save_channel, save_user, subscribe_user
from src.settings import TELEGRAM_TOKEN
from telegram.ext import (
CommandHandler,
Filters,
MessageHandler,
Updater,
CallbackContext,
)
from telegram import Update
def start_command(update: Update, context: CallbackContext):
"""As soon as the bot is started, the first command that by default the
user sends to it is '/start'. Here we define what will be answered,
which in this case, is a customized message with the name of the user
in question informing how he can interact with the bot.
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
context.bot.send_message(
chat_id=update.effective_chat.id,
text=GREETINGS_MESSAGE.format(update.effective_chat.username),
)
return None
def help_command(update: Update, context: CallbackContext) -> None:
"""To assist the user in teaching how he will use the bot, we have
specified this function that will give all the necessary
instructions to him.
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
context.bot.send_message(
chat_id=update.effective_chat.id, text=AVAILABLE_COMMANDS_MESSAGE
)
return None
def unknown_command(update: Update, context: CallbackContext) -> None:
"""Some users can write commands that are not handled by the bot. In order
not to make him anxious without knowing if something went right or not,
any command that is not mapped by the service will be answered with a
redirect to him using the command '/help'
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
context.bot.send_message(
chat_id=update.effective_chat.id, text=UNKNOWN_MESSAGE
)
return None
def subscribe_command(update: Update, context: CallbackContext) -> None:
"""This function is our "flagship". Basically this is where the user will
be able to subscribe to a channel to receive notifications for new videos.
Parameters
----------
update : Update
This object represents an incoming update.
context : CallbackContext
This is a context object passed to the callback called by
telegram.ext.Handler.
"""
try:
channel_id = context.args[0]
except IndexError:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=NON_INFORMED_CHANNEL_ID_MESSAGE,
)
return None
logger.info("Channel subscription requested. Initializing processing.")
chat_id = update.effective_chat.id
save_user(chat_id)
status = subscribe_in_pubsubhubbub(channel_id)
if status == 202:
logger.info("Request sent successfully.")
save_channel(channel_id)
subscribe_user(channel_id, chat_id)
context.bot.send_message(
chat_id=update.effective_chat.id, text=SUBSCRIPTION_MESSAGE
)
else:
logger.warning(
f"There was a problem sending your subscribe request. Status Code received: {status}"
)
context.bot.send_message(
chat_id=update.effective_chat.id, text=SUBSCRIPTION_ERROR_MESSAGE
)
return None
def main() -> None:
"""This is where the bot will actually start and handle requests with
Telegram users.
"""
updater = Updater(token=TELEGRAM_TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start_command))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(CommandHandler("subscribe", subscribe_command))
dispatcher.add_handler(MessageHandler(Filters.command, unknown_command))
updater.start_polling()
updater.idle()
return None
if __name__ == "__main__":
logger.info("Initializing bot...")
try:
main()
except (KeyboardInterrupt, SystemExit):
logger.info("Stopping bot...")
sys.exit(0)
|
[
"src.bot.logger.warning",
"src.bot.requester.subscribe_in_pubsubhubbub",
"telegram.ext.Updater",
"src.bot.messages.GREETINGS_MESSAGE.format",
"src.database.utils.subscribe_user",
"telegram.ext.MessageHandler",
"src.database.utils.save_channel",
"src.bot.logger.info",
"telegram.ext.CommandHandler",
"src.database.utils.save_user",
"sys.exit"
] |
[((3290, 3361), 'src.bot.logger.info', 'logger.info', (['"""Channel subscription requested. Initializing processing."""'], {}), "('Channel subscription requested. Initializing processing.')\n", (3301, 3361), False, 'from src.bot import logger\n'), ((3406, 3424), 'src.database.utils.save_user', 'save_user', (['chat_id'], {}), '(chat_id)\n', (3415, 3424), False, 'from src.database.utils import save_channel, save_user, subscribe_user\n'), ((3438, 3475), 'src.bot.requester.subscribe_in_pubsubhubbub', 'subscribe_in_pubsubhubbub', (['channel_id'], {}), '(channel_id)\n', (3463, 3475), False, 'from src.bot.requester import subscribe_in_pubsubhubbub\n'), ((4161, 4190), 'telegram.ext.Updater', 'Updater', ([], {'token': 'TELEGRAM_TOKEN'}), '(token=TELEGRAM_TOKEN)\n', (4168, 4190), False, 'from telegram.ext import CommandHandler, Filters, MessageHandler, Updater, CallbackContext\n'), ((4609, 4643), 'src.bot.logger.info', 'logger.info', (['"""Initializing bot..."""'], {}), "('Initializing bot...')\n", (4620, 4643), False, 'from src.bot import logger\n'), ((3507, 3548), 'src.bot.logger.info', 'logger.info', (['"""Request sent successfully."""'], {}), "('Request sent successfully.')\n", (3518, 3548), False, 'from src.bot import logger\n'), ((3557, 3581), 'src.database.utils.save_channel', 'save_channel', (['channel_id'], {}), '(channel_id)\n', (3569, 3581), False, 'from src.database.utils import save_channel, save_user, subscribe_user\n'), ((3590, 3625), 'src.database.utils.subscribe_user', 'subscribe_user', (['channel_id', 'chat_id'], {}), '(channel_id, chat_id)\n', (3604, 3625), False, 'from src.database.utils import save_channel, save_user, subscribe_user\n'), ((3760, 3871), 'src.bot.logger.warning', 'logger.warning', (['f"""There was a problem sending your subscribe request. Status Code received: {status}"""'], {}), "(\n f'There was a problem sending your subscribe request. Status Code received: {status}'\n )\n", (3774, 3871), False, 'from src.bot import logger\n'), ((4255, 4293), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start_command'], {}), "('start', start_command)\n", (4269, 4293), False, 'from telegram.ext import CommandHandler, Filters, MessageHandler, Updater, CallbackContext\n'), ((4322, 4358), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""help"""', 'help_command'], {}), "('help', help_command)\n", (4336, 4358), False, 'from telegram.ext import CommandHandler, Filters, MessageHandler, Updater, CallbackContext\n'), ((4387, 4433), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""subscribe"""', 'subscribe_command'], {}), "('subscribe', subscribe_command)\n", (4401, 4433), False, 'from telegram.ext import CommandHandler, Filters, MessageHandler, Updater, CallbackContext\n'), ((4462, 4510), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.command', 'unknown_command'], {}), '(Filters.command, unknown_command)\n', (4476, 4510), False, 'from telegram.ext import CommandHandler, Filters, MessageHandler, Updater, CallbackContext\n'), ((1220, 1276), 'src.bot.messages.GREETINGS_MESSAGE.format', 'GREETINGS_MESSAGE.format', (['update.effective_chat.username'], {}), '(update.effective_chat.username)\n', (1244, 1276), False, 'from src.bot.messages import AVAILABLE_COMMANDS_MESSAGE, GREETINGS_MESSAGE, NON_INFORMED_CHANNEL_ID_MESSAGE, SUBSCRIPTION_ERROR_MESSAGE, SUBSCRIPTION_MESSAGE, UNKNOWN_MESSAGE\n'), ((4720, 4750), 'src.bot.logger.info', 'logger.info', (['"""Stopping bot..."""'], {}), "('Stopping bot...')\n", (4731, 4750), False, 'from src.bot import logger\n'), ((4759, 4770), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4767, 4770), False, 'import sys\n')]
|
import datetime
import os
import certifi
import urllib3
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
# 写入文件
def write_file(file_path, text_content):
with open(file_path, 'w', encoding='utf-8') as f:
f.write(datetime.datetime.now().strftime('<!-- [store time] %Y-%m-%d %H:%M:%S.%f -->\n'))
f.write(text_content)
# 读出文件
def read_file(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
file_content = f.read()
return file_content
# 检查文件
def check_file(file_path, file_size=0):
return os.path.exists(file_path) and os.path.isfile(file_path) and os.path.getsize(file_path) / 1024 > file_size
# 组装文件名
def __make_up__(directory, name=None):
if name:
path = '%s%s.html' % (directory, name)
else:
path = '%s%s.html' % (directory, datetime.datetime.now().strftime('%Y-%m-%d-%H'))
return path
# 浏览器打开地址
def browser_html(html_uri, storage_directory=None, file_name=None):
if storage_directory and file_name:
file_path = __make_up__(storage_directory, file_name)
if check_file(file_path):
store_html = read_file(file_path)
else:
store_html = __browser__(html_uri)
write_file(file_path, store_html)
else:
store_html = __browser__(html_uri)
return store_html
# 模拟浏览器访问
def __browser__(uri):
options = Options()
options.set_headless()
browser = webdriver.Firefox(options=options)
browser.maximize_window()
browser.get(uri)
html_content = browser.page_source
browser.quit()
return html_content
# 请求打开地址
def request_html(html_uri, need_https=True, storage_directory=None, file_name=None):
if storage_directory and file_name:
file_path = __make_up__(storage_directory, file_name)
if check_file(file_path):
store_html = read_file(file_path)
else:
store_html = __request__(html_uri, need_https)
write_file(file_path, store_html)
else:
store_html = __request__(html_uri, need_https)
return store_html
# 请求地址访问
def __request__(uri, need_https=True):
if need_https:
html_http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
else:
html_http = urllib3.PoolManager()
html_response = html_http.request('GET', uri)
html_content = html_response.data.decode()
return html_content
|
[
"selenium.webdriver.Firefox",
"os.path.getsize",
"os.path.exists",
"datetime.datetime.now",
"os.path.isfile",
"urllib3.PoolManager",
"selenium.webdriver.firefox.options.Options",
"certifi.where"
] |
[((1456, 1465), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (1463, 1465), False, 'from selenium.webdriver.firefox.options import Options\n'), ((1509, 1543), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options'}), '(options=options)\n', (1526, 1543), False, 'from selenium import webdriver\n'), ((605, 630), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (619, 630), False, 'import os\n'), ((635, 660), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (649, 660), False, 'import os\n'), ((2409, 2430), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (2428, 2430), False, 'import urllib3\n'), ((665, 691), 'os.path.getsize', 'os.path.getsize', (['file_path'], {}), '(file_path)\n', (680, 691), False, 'import os\n'), ((2360, 2375), 'certifi.where', 'certifi.where', ([], {}), '()\n', (2373, 2375), False, 'import certifi\n'), ((275, 298), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (296, 298), False, 'import datetime\n'), ((879, 902), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (900, 902), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""Trello plugin for pomito."""
import logging
from trello import TrelloClient
from pomito.plugins import task
from pomito.task import Task
__all__ = ['TrelloTask']
logger = logging.getLogger('pomito.plugins.task.trello')
def _create_trello_client(api_key, api_secret):
"""Create default TrelloClient instance."""
return TrelloClient(api_key=api_key, api_secret=api_secret)
class TrelloTask(task.TaskPlugin):
"""Trello task plugin for pomito."""
def __init__(self, pomodoro_service, get_trello_api=_create_trello_client):
"""Create an instance of TrelloTask."""
if pomodoro_service is None:
raise ValueError("pomodoro_service must not be None.")
self._get_trello_client = get_trello_api
self._pomodoro_service = pomodoro_service
self.trello_api = None
self.trello_board = None
self.trello_list = None
def initialize(self):
"""Initialize the trello task plugin."""
def _get_config(config):
return self._pomodoro_service.get_config("task.trello", config)
api_key = _get_config("api_key")
api_secret = _get_config("api_secret")
self.trello_board = _get_config("board")
self.trello_list = _get_config("list")
self.trello_api = self._get_trello_client(api_key, api_secret)
if api_key is None or api_secret is None\
or self.trello_board is None or self.trello_list is None:
logger.error("Error initializing plugin: invalid configuration")
def get_tasks(self):
"""Get all incomplete tasks assigned to the user."""
# TODO support for dueDates
try:
def create_task(card):
"""Create a `Task` object from a trello dict."""
return Task(uid=card.id,
estimate=0,
actual=0,
tags=card.labels,
description=card.name)
for b in self.trello_api.list_boards():
if self.trello_board is not None and b.name != self.trello_board:
continue
if self.trello_list is not None:
lists = [lo for lo in b.list_lists() if lo.name == self.trello_list]
else:
lists = b.list_lists()
for l in lists:
yield from map(create_task, l.list_cards())
except AttributeError as attrib_error:
logger.error("Error getting tasklist: {0}".format(attrib_error))
|
[
"pomito.task.Task",
"trello.TrelloClient",
"logging.getLogger"
] |
[((203, 250), 'logging.getLogger', 'logging.getLogger', (['"""pomito.plugins.task.trello"""'], {}), "('pomito.plugins.task.trello')\n", (220, 250), False, 'import logging\n'), ((360, 412), 'trello.TrelloClient', 'TrelloClient', ([], {'api_key': 'api_key', 'api_secret': 'api_secret'}), '(api_key=api_key, api_secret=api_secret)\n', (372, 412), False, 'from trello import TrelloClient\n'), ((1822, 1907), 'pomito.task.Task', 'Task', ([], {'uid': 'card.id', 'estimate': '(0)', 'actual': '(0)', 'tags': 'card.labels', 'description': 'card.name'}), '(uid=card.id, estimate=0, actual=0, tags=card.labels, description=card.name\n )\n', (1826, 1907), False, 'from pomito.task import Task\n')]
|
"""
Unit tests
"""
from django.test import TestCase
from django.conf import settings
class BasicTests(TestCase):
def test_configuration(self):
"""
Test that the configuration is sane.
"""
self.assertTrue('ROLLBAR' in dir(settings),
msg='The ROLLBAR setting is not present.')
self.assertTrue(settings.ROLLBAR.get('access_token'),
msg='The ROLLBAR["access_token"] setting is blank.')
|
[
"django.conf.settings.ROLLBAR.get"
] |
[((348, 384), 'django.conf.settings.ROLLBAR.get', 'settings.ROLLBAR.get', (['"""access_token"""'], {}), "('access_token')\n", (368, 384), False, 'from django.conf import settings\n')]
|
import io
import json
import socket
from typing import Dict, Tuple, Union
import boto3
import pytest
import yaml
from botocore.client import BaseClient
from botocore.response import StreamingBody
from botocore.session import Session
from botocore.stub import Stubber
from pydantic import BaseModel, ValidationError
from pytest_mock import MockerFixture
from pydantic_appconfig import AppConfigHelper
class TestConfig(BaseModel):
"""Test pydantic parsing."""
__test__ = False
test_field_string: str
test_field_int: int
class Config:
"""The config, including title for the JSON schema."""
title = "TestConfig"
def test_config_returned_as_model(
appconfig_stub: Tuple[BaseClient, Stubber, Session],
mocker: MockerFixture,
) -> None:
"""Tests the config gets updated."""
client, stub, _ = appconfig_stub
stub.add_response(
"get_configuration",
_build_response(
{
"test_field_string": "testing_string",
"test_field_int": 42,
},
"1",
"application/json",
),
_build_request(),
)
mocker.patch.object(boto3, "client", return_value=client)
a: AppConfigHelper[TestConfig] = AppConfigHelper(
"AppConfig-App",
"AppConfig-Env",
"AppConfig-Profile",
15,
config_schema_model=TestConfig,
)
result = a.update_config()
assert result
assert a.config.test_field_string == "testing_string"
assert a.config.test_field_int == 42
assert a.config_version == "1"
def test_yaml_config_returned_as_model(
appconfig_stub: Tuple[BaseClient, Stubber, Session],
mocker: MockerFixture,
) -> None:
"""Tests the config gets updated."""
client, stub, _ = appconfig_stub
stub.add_response(
"get_configuration",
_build_response(
{
"test_field_string": "testing_string",
"test_field_int": 42,
},
"1",
"application/x-yaml",
),
_build_request(),
)
mocker.patch.object(boto3, "client", return_value=client)
a: AppConfigHelper[TestConfig] = AppConfigHelper(
"AppConfig-App",
"AppConfig-Env",
"AppConfig-Profile",
15,
config_schema_model=TestConfig,
)
result = a.update_config()
assert result
assert a.config.test_field_string == "testing_string"
assert a.config.test_field_int == 42
assert a.config_version == "1"
def test_config_model_parse_error(
appconfig_stub: Tuple[BaseClient, Stubber, Session], mocker: MockerFixture
) -> None:
"""Tests the config rejected."""
client, stub, _ = appconfig_stub
stub.add_response(
"get_configuration",
_build_response(
{
"xxx": "testing_string",
},
"1",
"application/json",
),
_build_request(),
)
mocker.patch.object(boto3, "client", return_value=client)
a: AppConfigHelper[TestConfig] = AppConfigHelper(
"AppConfig-App",
"AppConfig-Env",
"AppConfig-Profile",
15,
config_schema_model=TestConfig,
)
result = a.update_config()
assert result
with pytest.raises(ValidationError):
assert a.config.test_field_string
def _build_request(
app: str = "AppConfig-App",
env: str = "AppConfig-Env",
profile: str = "AppConfig-Profile",
client_id: str = None,
version: str = "null",
) -> Dict[str, str]:
if client_id is None:
client_id = socket.gethostname()
return {
"Application": app,
"ClientConfigurationVersion": str(version),
"ClientId": client_id,
"Configuration": profile,
"Environment": env,
}
def _build_response(
content: Union[Dict, str], version: str, content_type: str
) -> Dict[str, Union[str, StreamingBody]]:
if content_type == "application/json":
content_text = json.dumps(content).encode("utf-8")
elif content_type == "application/x-yaml":
content_text = str(yaml.dump(content)).encode("utf-8")
elif not isinstance(content, str):
raise ValueError("Unrecognised content.")
else:
content_text = content.encode("utf-8")
return {
"Content": StreamingBody(io.BytesIO(bytes(content_text)), len(content_text)),
"ConfigurationVersion": version,
"ContentType": content_type,
}
|
[
"pydantic_appconfig.AppConfigHelper",
"yaml.dump",
"json.dumps",
"socket.gethostname",
"pytest.raises"
] |
[((1252, 1362), 'pydantic_appconfig.AppConfigHelper', 'AppConfigHelper', (['"""AppConfig-App"""', '"""AppConfig-Env"""', '"""AppConfig-Profile"""', '(15)'], {'config_schema_model': 'TestConfig'}), "('AppConfig-App', 'AppConfig-Env', 'AppConfig-Profile', 15,\n config_schema_model=TestConfig)\n", (1267, 1362), False, 'from pydantic_appconfig import AppConfigHelper\n'), ((2196, 2306), 'pydantic_appconfig.AppConfigHelper', 'AppConfigHelper', (['"""AppConfig-App"""', '"""AppConfig-Env"""', '"""AppConfig-Profile"""', '(15)'], {'config_schema_model': 'TestConfig'}), "('AppConfig-App', 'AppConfig-Env', 'AppConfig-Profile', 15,\n config_schema_model=TestConfig)\n", (2211, 2306), False, 'from pydantic_appconfig import AppConfigHelper\n'), ((3072, 3182), 'pydantic_appconfig.AppConfigHelper', 'AppConfigHelper', (['"""AppConfig-App"""', '"""AppConfig-Env"""', '"""AppConfig-Profile"""', '(15)'], {'config_schema_model': 'TestConfig'}), "('AppConfig-App', 'AppConfig-Env', 'AppConfig-Profile', 15,\n config_schema_model=TestConfig)\n", (3087, 3182), False, 'from pydantic_appconfig import AppConfigHelper\n'), ((3284, 3314), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (3297, 3314), False, 'import pytest\n'), ((3605, 3625), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3623, 3625), False, 'import socket\n'), ((4013, 4032), 'json.dumps', 'json.dumps', (['content'], {}), '(content)\n', (4023, 4032), False, 'import json\n'), ((4123, 4141), 'yaml.dump', 'yaml.dump', (['content'], {}), '(content)\n', (4132, 4141), False, 'import yaml\n')]
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import FlightViewSet,TicketViewSet
router = DefaultRouter()
router.register('ticket', TicketViewSet)
router.register('flight', FlightViewSet)
urlpatterns = [
path('', include(router.urls))
]
|
[
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((147, 162), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (160, 162), False, 'from rest_framework.routers import DefaultRouter\n'), ((276, 296), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (283, 296), False, 'from django.urls import path, include\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib2tikz.save as tikz_save
import math
def derivative(y, h, n: int=1):
if n == 1:
return lambda x: (y(x + h) - y(x - h)) / (2 * h)
else:
return derivative(derivative(y, h, n - 1), h, 1)
def integral(y, h, a, b):
ret = 0
sgn = 1
if a > b:
sgn = -1
a, b = b, a
if abs(b - a) < h:
h *= abs(b - a)
for i in np.arange(a, b, h):
ret += y(i) * h
return ret * sgn
def fourier(y, h, n, a, b):
L = (b - a) / 2
a_0 = integral(y, h, a, b) / (2 * L)
a_n = [0] * n
b_n = [0] * n
for i in range(1, n + 1):
a_n[i - 1] = (1 / L) * integral(lambda x: y(x) * np.cos(i * np.pi * x / L), h, a, b)
b_n[i - 1] = (1 / L) * integral(lambda x: y(x) * np.sin(i * np.pi * x / L), h, a, b)
return lambda x: fouriereval(x, a_0, a_n, b_n, L)
def fouriereval(x, a_0, a_n, b_n, l):
ret = a_0
for i in range(1, len(a_n) + 1):
ret += a_n[i - 1] * np.cos(i * np.pi * x / l)
ret += b_n[i - 1] * np.sin(i * np.pi * x / l)
return ret
# def f(x):
# if x > 2:
# return f(x - 4)
# if x < -2:
# return f(x + 4)
# return ((x**3) - 4 * x) / 4
# def f(x):
# if x < -1:
# return f(x + 2)
# if x > 1:
# return f(x - 2)
# return -1 if x < 0 else 1
def fx(x, n):
if n == 1:
return np.sin(x)
return fx(np.sin(x) * np.pi / 2, n - 1)
# def f(x):
# return np.cos(np.tan(np.sin(x)))
def sirc(x):
return np.sqrt(1 - x**2)
def f(x):
if x < -2:
return f(x + 4)
if x > 2:
return f(x - 4)
if x < 0:
return -sirc(x + 1)
else:
return sirc(x - 1)
h = 0.001
x = np.arange(-4, 4, 0.01)
# kr = lambda x: derivative(f, h, 2)(x) / ((1 + derivative(f, h)(x)**2)**(3 / 2))
# dkr = derivative(kr, h)
# dy = derivative(f, h)
fr = fourier(f, h, 101, -2, 2)
plt.plot(x, np.vectorize(f)(x))
# plt.plot(x, np.vectorize(kr)(x))
# plt.plot(x, np.vectorize(dkr)(x))
# plt.plot(x, np.vectorize(dy)(x))
plt.plot(x, np.vectorize(fr)(x))
plt.axis([-4, 4, -5, 5])
plt.title("$f(x)$")
plt.grid(True)
tikz_save("PyPlotTesting/Figurer/" + "f" + str(1) + ".tikz", figureheight='\\figureheight', figurewidth='\\figurewidth')
|
[
"matplotlib.pyplot.title",
"numpy.vectorize",
"matplotlib.pyplot.axis",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] |
[((1563, 1585), 'numpy.arange', 'np.arange', (['(-4)', '(4)', '(0.01)'], {}), '(-4, 4, 0.01)\n', (1572, 1585), True, 'import numpy as np\n'), ((1926, 1950), 'matplotlib.pyplot.axis', 'plt.axis', (['[-4, 4, -5, 5]'], {}), '([-4, 4, -5, 5])\n', (1934, 1950), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1971), 'matplotlib.pyplot.title', 'plt.title', (['"""$f(x)$"""'], {}), "('$f(x)$')\n", (1961, 1971), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1987), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1981, 1987), True, 'import matplotlib.pyplot as plt\n'), ((394, 412), 'numpy.arange', 'np.arange', (['a', 'b', 'h'], {}), '(a, b, h)\n', (403, 412), True, 'import numpy as np\n'), ((1396, 1415), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (1403, 1415), True, 'import numpy as np\n'), ((1273, 1282), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1279, 1282), True, 'import numpy as np\n'), ((1766, 1781), 'numpy.vectorize', 'np.vectorize', (['f'], {}), '(f)\n', (1778, 1781), True, 'import numpy as np\n'), ((1904, 1920), 'numpy.vectorize', 'np.vectorize', (['fr'], {}), '(fr)\n', (1916, 1920), True, 'import numpy as np\n'), ((927, 952), 'numpy.cos', 'np.cos', (['(i * np.pi * x / l)'], {}), '(i * np.pi * x / l)\n', (933, 952), True, 'import numpy as np\n'), ((975, 1000), 'numpy.sin', 'np.sin', (['(i * np.pi * x / l)'], {}), '(i * np.pi * x / l)\n', (981, 1000), True, 'import numpy as np\n'), ((1294, 1303), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1300, 1303), True, 'import numpy as np\n'), ((644, 669), 'numpy.cos', 'np.cos', (['(i * np.pi * x / L)'], {}), '(i * np.pi * x / L)\n', (650, 669), True, 'import numpy as np\n'), ((731, 756), 'numpy.sin', 'np.sin', (['(i * np.pi * x / L)'], {}), '(i * np.pi * x / L)\n', (737, 756), True, 'import numpy as np\n')]
|
"""Unit tests for the Trello issues collector."""
from datetime import datetime
from .base import TrelloTestCase
class TrelloIssuesTest(TrelloTestCase):
"""Unit tests for the Trello issues collector."""
METRIC_TYPE = "issues"
async def test_issues(self):
"""Test that the number of issues and the individual issues are returned."""
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="2", entities=self.entities)
async def test_issues_with_ignored_list(self):
"""Test that lists can be ignored when counting issues."""
self.set_source_parameter("lists_to_ignore", ["list1"])
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="1", entities=[self.entities[1]])
async def test_overdue_issues(self):
"""Test overdue issues."""
self.set_source_parameter("cards_to_count", ["overdue"])
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="1", entities=[self.entities[1]])
async def test_inactive_issues(self):
"""Test inactive issues."""
self.set_source_parameter("cards_to_count", ["inactive"])
self.cards["cards"][0]["dateLastActivity"] = datetime.now().isoformat()
response = await self.collect(get_request_json_side_effect=self.json)
self.assert_measurement(response, value="1", entities=[self.entities[1]])
|
[
"datetime.datetime.now"
] |
[((1356, 1370), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1368, 1370), False, 'from datetime import datetime\n')]
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
#hello_constant = tf.constant('Hello World!')
#with tf.Session() as sess:
# output = sess.run(hello_constant)
# print(output)
# this is how you create an input field in Tensorflow
# not the tf.string part but the tf.placeholder part
# the tf.string part is just the type of placeholder that this specific case is
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.int32)
z = tf.placeholder(tf.float32)
with tf.Session() as sess:
#output = sess.run(x, feed_dict={x: 'Hello World' })
output = sess.run([x, y, z], feed_dict={x: 'Test String', y:123, z: 45.67})
print(output)
|
[
"tensorflow.placeholder",
"tensorflow.Session"
] |
[((400, 425), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {}), '(tf.string)\n', (414, 425), True, 'import tensorflow as tf\n'), ((430, 454), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (444, 454), True, 'import tensorflow as tf\n'), ((459, 485), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (473, 485), True, 'import tensorflow as tf\n'), ((493, 505), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (503, 505), True, 'import tensorflow as tf\n')]
|
"""Test the lambda assigning visitor."""
import ast
import pytest
from lime_lynter.Violations.correctness import LambdaAssigningViolation
from lime_lynter.Visitors.Correctness.correctness import LambdaAssigningVisitor
lambda_assigning = """
f = lambda x: 2 * x
"""
@pytest.mark.parametrize('code', [lambda_assigning])
def test_lambda_assigning(
code,
):
"""
Test lambda assigning.
Args:
code: Sample code.
"""
tree = ast.parse(code)
visitor = LambdaAssigningVisitor()
visitor.run(tree)
for violation in visitor.violations:
isinstance(violation, LambdaAssigningViolation)
assert len(visitor.violations) == 1
|
[
"pytest.mark.parametrize",
"ast.parse",
"lime_lynter.Visitors.Correctness.correctness.LambdaAssigningVisitor"
] |
[((271, 322), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code"""', '[lambda_assigning]'], {}), "('code', [lambda_assigning])\n", (294, 322), False, 'import pytest\n'), ((455, 470), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (464, 470), False, 'import ast\n'), ((485, 509), 'lime_lynter.Visitors.Correctness.correctness.LambdaAssigningVisitor', 'LambdaAssigningVisitor', ([], {}), '()\n', (507, 509), False, 'from lime_lynter.Visitors.Correctness.correctness import LambdaAssigningVisitor\n')]
|
# -*- coding: utf-8 -*-
import re
def minify(code: str) -> str:
# Mark end of directives
code = re.sub(r"([^\S\n]*#[^\n]+)", "%NEWLINE%\\1%NEWLINE%", code)
# Remove comments
code = re.sub(r"//[^\n]*", "", code)
code = re.sub(r"/\*(?:\*[^/]|[^*])*\*/", "", code)
# Remove newlines
code = re.sub(r"\n+", " ", code)
# Collapse whitespace
code = re.sub(r"\s{2,}", " ", code)
for c in r"+-*%/!~|&=$<>[]{}().:,;?":
code = re.sub(f"\\s*\{c}\\s*", c, code)
# Add in newlines for directives
code = code.replace("%NEWLINE%", "\n")
code = re.sub(r"\n\s+", "\n", code)
return code.strip()
|
[
"re.sub"
] |
[((105, 166), 're.sub', 're.sub', (['"""([^\\\\S\\\\n]*#[^\\\\n]+)"""', '"""%NEWLINE%\\\\1%NEWLINE%"""', 'code'], {}), "('([^\\\\S\\\\n]*#[^\\\\n]+)', '%NEWLINE%\\\\1%NEWLINE%', code)\n", (111, 166), False, 'import re\n'), ((199, 228), 're.sub', 're.sub', (['"""//[^\\\\n]*"""', '""""""', 'code'], {}), "('//[^\\\\n]*', '', code)\n", (205, 228), False, 'import re\n'), ((240, 285), 're.sub', 're.sub', (['"""/\\\\*(?:\\\\*[^/]|[^*])*\\\\*/"""', '""""""', 'code'], {}), "('/\\\\*(?:\\\\*[^/]|[^*])*\\\\*/', '', code)\n", (246, 285), False, 'import re\n'), ((318, 343), 're.sub', 're.sub', (['"""\\\\n+"""', '""" """', 'code'], {}), "('\\\\n+', ' ', code)\n", (324, 343), False, 'import re\n'), ((382, 410), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'code'], {}), "('\\\\s{2,}', ' ', code)\n", (388, 410), False, 'import re\n'), ((593, 622), 're.sub', 're.sub', (['"""\\\\n\\\\s+"""', '"""\n"""', 'code'], {}), "('\\\\n\\\\s+', '\\n', code)\n", (599, 622), False, 'import re\n'), ((468, 501), 're.sub', 're.sub', (['f"""\\\\s*\\\\{c}\\\\s*"""', 'c', 'code'], {}), "(f'\\\\s*\\\\{c}\\\\s*', c, code)\n", (474, 501), False, 'import re\n')]
|
# Generated by Django 2.0 on 2020-05-02 10:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0008_variation'),
]
operations = [
migrations.AddField(
model_name='variation',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='store.Product'),
),
migrations.AddField(
model_name='variation',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=100, null=True),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.DecimalField"
] |
[((356, 489), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""images"""', 'to': '"""store.Product"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='images', to='store.Product')\n", (373, 489), False, 'from django.db import migrations, models\n'), ((606, 682), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(100)', 'null': '(True)'}), '(blank=True, decimal_places=2, max_digits=100, null=True)\n', (625, 682), False, 'from django.db import migrations, models\n')]
|
from django.db import models
import hashlib
from django.core.validators import RegexValidator
from inventario.models import DetallesProducto
class Cliente(models.Model):
TIPO_DOC = {
('PAS','Pasaporte'),
('CC','Cedula de Ciudadania'),
('TI','Tarjeta de Identidad'),
}
nombre = models.CharField(max_length=128, unique=True, primary_key=True)
clave = models.CharField(max_length=128, editable=True)
fechaNacimiento = models.DateField()
direccion = models.CharField(max_length=32)
telefono_regex = RegexValidator(regex=r'^\+?1?\d{7,10}$', message="El telefono debe tener formato: '+7777777'. Up to 10 digits allowed.")
telefono = models.CharField(validators=[telefono_regex], max_length=12, blank=True) # validators should be a list
tipoDocumento = models.CharField(max_length=3, choices = TIPO_DOC)
numeroDocumento = models.IntegerField()
#super().save(*args, **kwargs) para guardar en esta tabla
def save(self, *args, **kwargs):
self.clave = hashlib.md5(self.clave.encode('utf-8')).hexdigest()
super(Cliente, self).save(*args, **kwargs)
def autenticarCliente(self, *args, **kwargs):
auth = Cliente.objects.filter(nombre=self.nombre,
clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest()).exists()
return auth
def buscarCliente(self, *args, **kwargs):
aux = Cliente.objects.filter(nombre=self.nombre,
clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest())
return aux
class AdministradorDuenio (models.Model):
TIPO = {
('ADMIN','Administrador'),
('CEO','Duenio'),
}
pkAdministradorDuenio = models.AutoField(primary_key=True)
nombreUsuario = models.CharField(max_length=128, unique=True)
clave = models.CharField(max_length=128, editable=True)
tipo = models.CharField(max_length=5, choices=TIPO)
#super().save(*args, **kwargs) para guardar en esta tabla
def save(self, *args, **kwargs):
self.clave = hashlib.md5(self.clave.encode('utf-8')).hexdigest()
super(AdministradorDuenio, self).save(*args, **kwargs)
def autenticarAdmin(self, *args, **kwargs):
auth = AdministradorDuenio.objects.filter(nombreUsuario=self.nombreUsuario, clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest(), tipo='ADMIN').exists()
return auth
def autenticarDuenio(self, *args, **kwargs):
auth = AdministradorDuenio.objects.filter(nombreUsuario=self.nombreUsuario, clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest(), tipo='CEO').exists()
return auth
#ProductosEnCarrito
class Carrito(models.Model):
pkCarrito = models.AutoField(primary_key=True)
fkNombreCliente = models.ForeignKey(Cliente, on_delete=models.SET_NULL, null=True)
fkDetalleProducto = models.ForeignKey(DetallesProducto, on_delete=models.CASCADE)
cantidad = models.IntegerField()
precioActual = models.FloatField()
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.AutoField",
"django.db.models.IntegerField",
"django.core.validators.RegexValidator",
"django.db.models.DateField"
] |
[((318, 381), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'unique': '(True)', 'primary_key': '(True)'}), '(max_length=128, unique=True, primary_key=True)\n', (334, 381), False, 'from django.db import models\n'), ((394, 441), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'editable': '(True)'}), '(max_length=128, editable=True)\n', (410, 441), False, 'from django.db import models\n'), ((464, 482), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (480, 482), False, 'from django.db import models\n'), ((499, 530), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (515, 530), False, 'from django.db import models\n'), ((553, 679), 'django.core.validators.RegexValidator', 'RegexValidator', ([], {'regex': '"""^\\\\+?1?\\\\d{7,10}$"""', 'message': '"""El telefono debe tener formato: \'+7777777\'. Up to 10 digits allowed."""'}), '(regex=\'^\\\\+?1?\\\\d{7,10}$\', message=\n "El telefono debe tener formato: \'+7777777\'. Up to 10 digits allowed.")\n', (567, 679), False, 'from django.core.validators import RegexValidator\n'), ((689, 761), 'django.db.models.CharField', 'models.CharField', ([], {'validators': '[telefono_regex]', 'max_length': '(12)', 'blank': '(True)'}), '(validators=[telefono_regex], max_length=12, blank=True)\n', (705, 761), False, 'from django.db import models\n'), ((812, 860), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'choices': 'TIPO_DOC'}), '(max_length=3, choices=TIPO_DOC)\n', (828, 860), False, 'from django.db import models\n'), ((885, 906), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (904, 906), False, 'from django.db import models\n'), ((1733, 1767), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1749, 1767), False, 'from django.db import models\n'), ((1788, 1833), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'unique': '(True)'}), '(max_length=128, unique=True)\n', (1804, 1833), False, 'from django.db import models\n'), ((1846, 1893), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'editable': '(True)'}), '(max_length=128, editable=True)\n', (1862, 1893), False, 'from django.db import models\n'), ((1905, 1949), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5)', 'choices': 'TIPO'}), '(max_length=5, choices=TIPO)\n', (1921, 1949), False, 'from django.db import models\n'), ((2730, 2764), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2746, 2764), False, 'from django.db import models\n'), ((2787, 2851), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cliente'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Cliente, on_delete=models.SET_NULL, null=True)\n', (2804, 2851), False, 'from django.db import models\n'), ((2877, 2938), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DetallesProducto'], {'on_delete': 'models.CASCADE'}), '(DetallesProducto, on_delete=models.CASCADE)\n', (2894, 2938), False, 'from django.db import models\n'), ((2954, 2975), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2973, 2975), False, 'from django.db import models\n'), ((2995, 3014), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3012, 3014), False, 'from django.db import models\n')]
|
import experiments
import stages
from nose.tools import assert_equals
import factory
rows = None
def setup_module():
global rows
data = factory.Stages()
rows = data.indexed_rows()
class TestExperiments:
@classmethod
def setup_class(cls):
stgs = [stages.stage_from(r) for r in rows]
exps = experiments.experiments_from(stgs)
cls.experiments = {}
for e in exps:
cls.experiments[e.experiment_id()] = e
def test_complete(self):
exp = self.experiments['a7e32988-9e4b-4a2d-bbb2-a35798e7e8f1']
assert_equals(exp.has_stage('timeline'), True)
assert_equals(type(exp.get_stage('timeline')), stages.Timeline)
assert_equals(exp.time_start(), 1410966188680)
assert_equals(exp.time_duration(), 556956)
assert_equals(exp.num_stages(), 8)
#assert_equals(exp.experiment_id(), 'a7e32988-9e4b-4a2d-bbb2-a35798e7e8f1')
assert_equals(exp.size_in_bytes(), 131271)
assert_equals(exp.is_complete(), True)
def test_incomplete(self):
exp = self.experiments['c16705a3-64c1-43e5-b70f-24a769dced32']
assert_equals(exp.has_stage('timeline'), False)
assert_equals(exp.num_stages(), 6)
#assert_equals(exp.experiment_id(), 'c16705a3-64c1-43e5-b70f-24a769dced32')
assert_equals(exp.is_complete(), False)
|
[
"stages.stage_from",
"factory.Stages",
"experiments.experiments_from"
] |
[((147, 163), 'factory.Stages', 'factory.Stages', ([], {}), '()\n', (161, 163), False, 'import factory\n'), ((330, 364), 'experiments.experiments_from', 'experiments.experiments_from', (['stgs'], {}), '(stgs)\n', (358, 364), False, 'import experiments\n'), ((279, 299), 'stages.stage_from', 'stages.stage_from', (['r'], {}), '(r)\n', (296, 299), False, 'import stages\n')]
|
import regex as re
import requests
from time import sleep
from digi.xbee.devices import XBeeDevice, RemoteXBeeDevice, XBee64BitAddress
from digi.xbee.exception import TimeoutException
from datetime import datetime
class MSG_TYPES:
ACKN = 0
SYNC = 1
UPDA = 2
SYNACK = 3
class UpdatePayload:
lightIntensity = 0
temperature = 0
batteryLevel = 0
rssiToGateway = 0
motionDetected = 0
class AckPayload:
seqNumToAck = 0
class SynAckPayload:
nodeId = 0
utcSec = ""
defaultSleep = 0
class HMSFrame:
seqNum = 0
nodeId = 0
srcAddr = 0
dstAddr = 0
msgType = 0
payloadLen = 0
payload = ""
cksum = 0
class HMSGateway():
SENSOR_NODE_ID = "SENSOR_NODE"
SENSOR_NODE_ADDR = "0013A200416B4BA2"
#SENSOR_NODE_ADDR = "0000000000000001"
nodeUrl = "http://127.0.0.1:8000/rest/node/"
dataUrl = "http://127.0.0.1:8000/rest/data/"
defaultSleep = 30
ACKS = []
LAST_UPDA = []
lastSyncedAt = []
src_node = None
sequenceNum = 0
nodeID = 0
nodeAddr = 0
SYNC_IN_PROGRESS = False
NODE_ID_WITH_ADDRESS = []
def postNodeInfo(self, nodeID, rssi, motionDetected):
postData = {
"nodeId": nodeID,
"rssi": rssi,
"motionDetected": motionDetected,
"updated_at": "{}".format(datetime.now())
}
requests.post(self.nodeUrl, data = postData)
def postNodeData(self, nodeID, updatePayload):
postData = {
"fromNodeID": nodeID,
"lightIntensity": updatePayload.lightIntensity,
"temperature": updatePayload.temperature,
"batteryLevel": updatePayload.batteryLevel
}
requests.post(self.dataUrl, data = postData)
def encode_hms_frame(self, txFrame):
txFrame.payloadLen, txFrame.payload = self.encode_hmsframe_payload(txFrame)
frameAsStr = ''.join((
str(txFrame.seqNum) + ";",
str(txFrame.nodeId) + ";",
str(txFrame.srcAddr) + ";",
str(txFrame.dstAddr) + ";",
str(txFrame.msgType) + ";",
str(txFrame.payloadLen) + ";",
str(txFrame.payload) + ";",
str(txFrame.cksum) + ";",
))
print(frameAsStr)
return bytearray(frameAsStr, 'utf-8')
def decode_hms_frame(self, rxMsg):
frameData = rxMsg.split(";")
if len(frameData) != 9:
return None
rxFrame = HMSFrame()
rxFrame.seqNum = int(frameData[0])
rxFrame.nodeId = int(frameData[1])
rxFrame.srcAddr = int(frameData[2])
rxFrame.dstAddr = int(frameData[3])
rxFrame.msgType = int(frameData[4])
rxFrame.payloadLen = int(frameData[5])
rxFrame.payload = frameData[6]
rxFrame.cksum = int(frameData[7])
# check cksum
rxFrame.payload = self.decode_hmsframe_payload(rxFrame)
return rxFrame
def encode_hmsframe_payload(self, txFrame):
if txFrame.payload == "":
print("No payload in frame")
return 0, ""
if txFrame.msgType == MSG_TYPES.ACKN:
print("ACK payload")
ackPayloadAsStr = str(txFrame.payload.seqNumToAck) + "|"
return len(ackPayloadAsStr), ackPayloadAsStr
elif txFrame.msgType == MSG_TYPES.SYNACK:
print("SYNACK payload")
synAckPayloadAsStr = ''.join((
str(txFrame.payload.nodeId) + "|",
str(txFrame.payload.utcSec) + "|",
str(txFrame.payload.defaultSleep) + "|",
))
return len(synAckPayloadAsStr), synAckPayloadAsStr
else:
print("Payload not known")
return 0, ""
def decode_hmsframe_payload(self, rxFrame):
if rxFrame.payloadLen == 0:
return ""
payload = rxFrame.payload.split("|")
if rxFrame.msgType == MSG_TYPES.ACKN:
if len(payload) != 2:
return ""
acknPayload = AckPayload()
acknPayload.seqNumToAck = int(payload[0])
return acknPayload
elif rxFrame.msgType == MSG_TYPES.UPDA:
if len(payload) != 6:
return ""
print("Updating")
updatePayload = UpdatePayload()
updatePayload.lightIntensity = int(payload[0])
updatePayload.temperature = int(payload[1])
updatePayload.batteryLevel = int(payload[2])
updatePayload.rssiToGateway = int(payload[3])
updatePayload.motionDetected = int(payload[4])
return updatePayload
elif rxFrame.msgType == MSG_TYPES.SYNC:
return ""
else:
print("Unknown msg type to decode")
return ""
def process_received_frame(self, rxFrame):
if rxFrame.dstAddr == 0:
if rxFrame.msgType == MSG_TYPES.ACKN and rxFrame.payload != "":
self.ACKS.append(rxFrame.payload.seqNumToAck)
print("ACK RECEVIED")
elif rxFrame.msgType == MSG_TYPES.SYNC:
print("SYNC RECEVIED")
self.handle_sync_request(rxFrame)
elif rxFrame.msgType == MSG_TYPES.UPDA:
print("UPDA RECEVIED")
if rxFrame.nodeId != self.getNextSensorIdOrSync(rxFrame)[1]:
self.NODE_ID_WITH_ADDRESS = [item for item in self.NODE_ID_WITH_ADDRESS if item[1] != rxFrame.srcAddr]
self.handle_sync_request(rxFrame)
else:
if self.store_node_sync_if_needed(rxFrame) == True:
self.handle_sync_request(rxFrame)
else:
txFrame = HMSFrame()
txFrame.msgType = MSG_TYPES.ACKN
txFrame.dstAddr = rxFrame.srcAddr
acknPayload = AckPayload()
acknPayload.seqNumToAck = rxFrame.seqNum
txFrame.payload = acknPayload
print("SENDING ACK")
self.send_HMS_Frame(txFrame)
sleep(0.2)
current = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
nodeNotFound = True
for i in range(0, len(self.LAST_UPDA)):
if self.LAST_UPDA[i][0] == rxFrame.nodeId:
nodeNotFound = False
if self.LAST_UPDA[i][1] < current - self.defaultSleep:
self.LAST_UPDA[i] = (rxFrame.nodeId, current)
self.postNodeData(rxFrame.nodeId, rxFrame.payload)
self.postNodeInfo(rxFrame.nodeId, rxFrame.payload.rssiToGateway, rxFrame.payload.motionDetected)
if nodeNotFound == True:
self.LAST_UPDA.append((rxFrame.nodeId, current))
self.postNodeData(rxFrame.nodeId, rxFrame.payload)
self.postNodeInfo(rxFrame.nodeId, rxFrame.payload.rssiToGateway, rxFrame.payload.motionDetected)
elif rxFrame.msgType == MSG_TYPES.SYNACK:
print("SYNACK RECEVIED")
else:
print("Msg not for Gateway")
def store_node_sync_if_needed(self, rxFrame):
nodeNotFound = True
syncNode = False
current = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
for i in range(0, len(self.lastSyncedAt)):
if self.lastSyncedAt[i][0] == rxFrame.nodeId and self.lastSyncedAt[i][1] < (current - 600):
self.lastSyncedAt[i] = (rxFrame.nodeId, current)
nodeNotFound = False
syncNode = True
if nodeNotFound == True:
self.lastSyncedAt.append((rxFrame.nodeId, current))
return syncNode
def send_HMS_Frame(self, txFrame):
txFrame.nodeId = self.nodeID
txFrame.seqNum = self.sequenceNum
txFrame.cksum = 0
txFrame.srcAddr = self.nodeAddr
encodedFrame = self.encode_hms_frame(txFrame)
self.src_node.set_sync_ops_timeout(0.8)
for i in range(0, 5):
try:
self.src_node.send_data_broadcast(encodedFrame)
except Exception as e:
pass
self.sequenceNum += 1
return txFrame.seqNum
def handle_sync_request(self, rxFrame):
self.SYNC_IN_PROGRESS = True
txFrame = HMSFrame()
txFrame.msgType = MSG_TYPES.SYNACK
txFrame.dstAddr = rxFrame.srcAddr
synAckPayload = SynAckPayload()
synAckPayload.nodeId = self.getNextSensorIdOrSync(rxFrame)[1]
now = datetime.now()
synAckPayload.utcSec = now.strftime("%y:%m:%d:0%w:%H:%M:%S")
synAckPayload.defaultSleep = self.defaultSleep
txFrame.payload = synAckPayload
self.send_frame_and_wait_for_ack(txFrame, synAckPayload)
def getNextSensorIdOrSync(self, rxFrame):
for item in self.NODE_ID_WITH_ADDRESS:
if item[1] == rxFrame.srcAddr:
return True, item[0]
maxNodeId = len(self.NODE_ID_WITH_ADDRESS) + 1
self.NODE_ID_WITH_ADDRESS.append((maxNodeId, rxFrame.srcAddr))
return False, maxNodeId
def data_receive_callback(self, frame):
if frame is not None:
rx_data = frame.data.decode(errors='replace')
if rx_data != "":
rxMsg = rx_data.split("STR:")[1]
if rxMsg != "":
rxMsg = rxMsg.replace("#", "")
print(rxMsg)
hmsFrame = self.decode_hms_frame(rxMsg)
self.process_received_frame(hmsFrame)
def send_frame_and_wait_for_ack(self, txFrame, payload, waitForAck=False):
max_retries = 5
num_retry = 0
while(num_retry < max_retries):
seqNumToAck = self.send_HMS_Frame(txFrame)
sleep(1)
if seqNumToAck in self.ACKS:
self.ACKS.remove(seqNumToAck)
break
num_retry += 1
txFrame.payload = payload
print("RETRYING - NO ACK RECEIVED")
def init_and_open_xbee_device(self):
serialPort = input("Serial Port [COM4]: ")
if serialPort == "":
serialPort = "COM4"
bdrate = input("Baudrate [115200]: ")
if bdrate == "":
bdrate = 115200
else:
bdrate = int(bdrate)
try:
self.src_node = XBeeDevice(serialPort, bdrate)
self.src_node.open()
return True
except Exception as e:
pass
return True
####################################
def runApp(self):
print("\n\n### HOME MONITORING SYSTEM - GATEWAY ###\n\n")
ret = self.init_and_open_xbee_device()
if not ret:
print("Initialization failed -> check log\n")
print("XBEE Device initialized\n")
self.src_node.add_data_received_callback(self.data_receive_callback)
print("# CALLBACK ADDED #\n")
while(1):
sleep(1)
|
[
"time.sleep",
"datetime.datetime",
"datetime.datetime.utcnow",
"digi.xbee.devices.XBeeDevice",
"requests.post",
"datetime.datetime.now"
] |
[((1391, 1433), 'requests.post', 'requests.post', (['self.nodeUrl'], {'data': 'postData'}), '(self.nodeUrl, data=postData)\n', (1404, 1433), False, 'import requests\n'), ((1731, 1773), 'requests.post', 'requests.post', (['self.dataUrl'], {'data': 'postData'}), '(self.dataUrl, data=postData)\n', (1744, 1773), False, 'import requests\n'), ((8902, 8916), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8914, 8916), False, 'from datetime import datetime\n'), ((10179, 10187), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (10184, 10187), False, 'from time import sleep\n'), ((10771, 10801), 'digi.xbee.devices.XBeeDevice', 'XBeeDevice', (['serialPort', 'bdrate'], {}), '(serialPort, bdrate)\n', (10781, 10801), False, 'from digi.xbee.devices import XBeeDevice, RemoteXBeeDevice, XBee64BitAddress\n'), ((11389, 11397), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (11394, 11397), False, 'from time import sleep\n'), ((1356, 1370), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1368, 1370), False, 'from datetime import datetime\n'), ((7583, 7600), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7598, 7600), False, 'from datetime import datetime\n'), ((7601, 7621), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (7609, 7621), False, 'from datetime import datetime\n'), ((6227, 6237), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (6232, 6237), False, 'from time import sleep\n'), ((6277, 6294), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6292, 6294), False, 'from datetime import datetime\n'), ((6295, 6315), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (6303, 6315), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python
import os
import sys
import argparse
from glob import glob
from textwrap import dedent
from collections import namedtuple
__version__ = 20200612
__doc__ = """Pack multiple small jobs into large queue jobs
* How it works
* The script merely generates a queue job script and a (mpi-aware) python script
* An outer mpirun in the queue job script places job launchers in the correct nodes
* An Inner mpirun in the job launchers run the application inside each node
* The "trick" here is simply to make the queue treat the inner mpi processes
as if they were openmp threads of the outer mpi processes
* How to use
* Run ./packjobs.py -h to see all the command line options
* Test run with e.g. 2 nodes, 12 procs per job, 2*24/12=4 simultaneous jobs, 1 hour:
./packjobs.py -i jobs_folder -r vasp_std -m VASP --nodes 2 --cpn 24 --ppj 12 --time 1
* Production run with e.g. 50 nodes, 4 procs per job, 50*24/4=300 simultaneous jobs, 24 hours:
./packjobs.py -i jobs_folder -r vasp_std -m VASP --nodes 50 --cpn 24 --ppj 4 --time 24
* Limitations
* If subfolders are added to the job folder after the launchers start running,
the new subfolders will not be considered, although this may change in the future
* However, this script can be run multiple times on the same job folder,
without duplications (the script tags each subfolder as "running" or "done")
* After a queue job is killed or expires, you may need to clean any "running" tags
with "--clean running"
"""
def parse_arguments():
"""Use argparse to get parameters from the command line"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-V', '--version', action='version', version='%%(prog)s %s' % __version__)
parser.add_argument('-i', '--input', dest='folder', type=str,
help="folder containing job folders (mandatory)", required=True)
parser.add_argument('-r', '--run', dest='job_cmd', type=str,
help="job command (e.g. vasp_std) (mandatory)", required=True)
parser.add_argument('-m', '--mod', dest='job_mod', type=str,
help="app module (e.g. VASP) (mandatory)", required=True)
parser.add_argument('-p', '--python-mod', dest='python_mod', type=str,
help="python module (e.g. Python)", default='Python')
parser.add_argument('-n', '--nodes', dest='nodes', type=int,
help="number of nodes (mandatory)", required=True)
parser.add_argument('-t', '--time', dest='hours', type=int, default=1,
help="number of hours for qjob (default: 1)")
parser.add_argument('-q', '--queue', dest='queue', type=str, default='normal',
help="name of batch queue for qjob (default: normal)")
parser.add_argument('-b', '--batch', dest='batch', type=str, default='pbs',
help="name of batch system for qjob, currently pbs or lsf (default: pbs)")
parser.add_argument('--cpn', '--cores-per-node', dest='cores_per_node', type=int, default=24,
help="number of cores per node (default: 24)")
parser.add_argument('--mpn', '--memory-per-node', dest='memory_per_node', type=int, default=96,
help="memory per node, in GB (default: 96)")
parser.add_argument('--ppj', '--procs-per-job', dest='procs_per_job', type=int, default=1,
help="number of mpi processes per job (default: 1)")
parser.add_argument('-d', '--dry-run', dest='dry', action='store_true', default=False,
help="don't submit, only create scripts (default: false)")
parser.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help="don't ask for confirmation when deleting files (default: false)")
parser.add_argument('-c', '--clean', action='append', default=[],
choices=['done', 'running', 'scripts', 'all'],
help='delete previously generated file (default: false)')
args = parser.parse_args()
if 'all' in args.clean:
args.clean.append('done')
args.clean.append('running')
args.clean.append('scripts')
if not os.path.isdir(args.folder):
print("\n Folder %s does not exist, exiting" % args.folder)
sys.exit(1)
if args.cores_per_node % args.procs_per_job != 0:
print("\n cores_per_node must be divisible by procs_per_job")
sys.exit(1)
args.jobs_per_node = int(args.cores_per_node/args.procs_per_job)
print("\n Requesting %s nodes, %s cores per node, using %s processes per job" %
(args.nodes, args.cores_per_node, args.procs_per_job))
print("\n This means %s jobs per node, %s simultaneous jobs at any given time\n" %
(args.jobs_per_node, args.jobs_per_node*args.nodes))
return args
class PackJobs:
__doc__ = __doc__
def __init__(self, **kwargs):
"""Takes keywords and maps them explicitly to class attributes"""
self.nodes = kwargs.pop('nodes')
self.folder = kwargs.pop('folder')
self.job_cmd = kwargs.pop('job_cmd')
self.job_mod = kwargs.pop('job_mod')
self.python_mod = kwargs.pop('python_mod')
self.hours = kwargs.pop('hours', 1)
self.queue = kwargs.pop('queue', 'normal')
self.batch = kwargs.pop('batch', 'pbs')
self.cores_per_node = kwargs.pop('cores_per_node', 24)
self.memory_per_node = kwargs.pop('memory_per_node', 96)
self.procs_per_job = kwargs.pop('procs_per_job', 1)
self.jobs_per_node = kwargs.pop('jobs_per_node', int(self.cores_per_node/self.procs_per_job))
self.dry = kwargs.pop('dry', False)
self.force = kwargs.pop('force', False)
self.clean = kwargs.pop('clean', False)
if len(kwargs.keys()) > 0:
self.log("don't know what to do with remaining arguments %s" % str(kwargs))
if self.batch == 'lsf':
self.qjob_script_template = self.qjob_lsf_template
self.qjob_sub_cmd = 'bsub <'
self.qjob_stat_cmd = 'bjobs'
else:
self.qjob_script_template = self.qjob_pbs_template
self.qjob_sub_cmd = 'qsub'
self.qjob_stat_cmd = 'qstat'
self.mpirun_job = ''
self.qjob_script_path = ''
def run(self):
"""Run all steps (clean, read_jobs, write_scripts, submit_jobs)"""
self.clean_files()
self.read_jobs()
self.write_scripts()
self.submit_jobs()
def clean_files(self):
"""Clean previously generated files if requested applicable"""
if 'all' in self.clean:
self.log("Warning: Deleting all files (but not subfolders) in %s" % self.folder)
if self.confirm():
for f in glob(os.path.join(self.folder, '*')):
if os.path.isfile(f):
os.remove(f)
else:
if 'scripts' in self.clean:
self.log("Warning: Deleting any previously generated qjob and worker scripts")
if self.confirm():
for qjob_script in glob(os.path.join(self.folder, 'qjob.script')):
os.remove(qjob_script)
for worker_py in glob(os.path.join(self.folder, 'worker*.py')):
os.remove(worker_py)
def read_jobs(self):
"""Look for jobs in job folder"""
self.log("Reading from folder %s" % self.folder)
Job = namedtuple('Job', ['folder', 'running', 'done'])
all_jobs = sorted([Job(subfolder,
os.path.isfile(os.path.join(self.folder, subfolder, 'running')),
os.path.isfile(os.path.join(self.folder, subfolder, 'done')))
for subfolder in os.listdir(self.folder)
if os.path.isdir(os.path.join(self.folder, subfolder))])
running_jobs = [job.folder for job in all_jobs if job.running]
finished_jobs = [job.folder for job in all_jobs if job.done]
unstarted_jobs = [job.folder for job in all_jobs if not job.running and not job.done]
self.log("Found %s jobs, %s of them currently running, %s of them done" %
(len(all_jobs), len(running_jobs), len(finished_jobs)))
jobs = unstarted_jobs
if 'running' in self.clean:
self.log("Warning: Forcing execution of jobs tagged as running")
if self.confirm():
for job in running_jobs:
os.remove(os.path.join(self.folder, job, 'running'))
jobs.extend(running_jobs)
if 'done' in self.clean:
self.log("Warning: Forcing execution of jobs tagged as done")
if self.confirm():
for job in finished_jobs:
os.remove(os.path.join(self.folder, job, 'done'))
jobs.extend(finished_jobs)
if len(jobs) > 0:
self.log("Adding %s jobs" % len(jobs))
if len(jobs) < self.jobs_per_node*self.nodes:
print("WARNING: with these jobs and parameters, some cores will be idle")
else:
self.log("No jobs left to run, exiting. You may want to use clean done and/or clean running")
sys.exit(1)
def write_scripts(self):
"""Write queue job and launcher scripts according to given parameters"""
self.mpirun_job = "mpirun -host $(hostname) -np %s %s > out 2> error" % \
(self.procs_per_job, self.job_cmd)
var_dict = {
'folder': self.folder,
'job_cmd': self.mpirun_job,
'job_mod': self.job_mod,
'python_mod': self.python_mod,
'nnodes': self.nodes,
'cpn': self.cores_per_node,
'mpn': self.memory_per_node,
'sjpn': self.jobs_per_node,
'ppj': self.procs_per_job,
'hours': self.hours,
'queue': self.queue,
'njobs': self.jobs_per_node*self.nodes,
'nslots': int(self.nodes*self.cores_per_node),
}
existing_workers = glob(os.path.join(self.folder, 'worker*.py'))
worker = 'worker%s' % (len(existing_workers)+1)
worker_py = worker + '.py'
var_dict['worker'] = worker
var_dict['worker_py'] = worker_py
worker_py_path = os.path.join(self.folder, worker_py)
if not self.dry:
self.log("Writing %s" % worker_py_path)
f = open(worker_py_path, 'w')
f.write(dedent(self.worker_script_template % var_dict))
f.close()
os.system("chmod +x %s" % worker_py_path)
existing_qjobs = glob(os.path.join(self.folder, 'qjob*.script'))
self.qjob_script_path = os.path.join(self.folder, 'qjob%s.script' % (len(existing_qjobs) + 1))
if not self.dry:
self.log("Writing %s" % self.qjob_script_path)
f = open(self.qjob_script_path, 'w')
f.write(dedent(self.qjob_script_template % var_dict))
f.close()
def submit_jobs(self):
"""Submit queue job"""
if not self.dry:
self.log("Submitting %s" % self.qjob_script_path)
folder, script = os.path.split(self.qjob_script_path)
os.system("cd %s; %s %s" % (folder, self.qjob_sub_cmd, script))
sys.stdout.write("\n")
os.system(self.qjob_stat_cmd)
def log(self, msg):
"""Print formatted log message"""
output = " "
if self.dry:
output += "(dry run) "
output += msg
output += "\n\n"
sys.stdout.write(output)
def confirm(self, prompt=None, default_yes=True, abort_no=False):
"""Prompt for confirmation, optionally aborting execution"""
if self.dry:
return False
if self.force:
return True
if prompt is None:
prompt = 'Proceed?'
if default_yes:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ask = getattr(__builtins__, 'raw_input', input)
ans = ask(prompt)
if not ans:
return default_yes
if ans not in ['y', 'Y', 'n', 'N']:
print('please enter y or n.')
continue
if ans in ('Y', 'y'):
return True
if ans in ('N', 'n'):
if abort_no:
sys.exit(1)
else:
return False
qjob_pbs_template = """\
#!/bin/bash
#PBS -N %(worker)s
#PBS -l select=%(nnodes)s:ncpus=%(cpn)s:mpiprocs=%(sjpn)s:ompthreads=%(ppj)s:mem=%(mpn)sGB
#PBS -l walltime=%(hours)s:00:00
#PBS -j oe
#PBS -q %(queue)s
cd $PBS_O_WORKDIR
module purge
module load %(python_mod)s %(job_mod)s
# this mpirun, combined with mpiprocs and ompthreads queue settings,
# starts job launchers in the correct nodes
mpirun -np %(njobs)s ./%(worker_py)s
"""
qjob_lsf_template = """\
#!/bin/bash
#BSUB -J %(worker)s
#BSUB -n %(nslots)s
#BSUB -q %(queue)s
#BSUB -R \"span[ptile=%(sjpn)s]\"
#BSUB -R \"rusage[mem=%(mpn)s000]\"
#BSUB -W %(hours)s:00
#BSUB -eo
#BSUB -x
module purge
module load %(python_mod)s %(job_mod)s
# this mpirun, combined with the span[ptile] queue setting,
# starts job launchers in the correct nodes
mpirun -np %(njobs)s ./%(worker_py)s
"""
worker_script_template = """\
#!/usr/bin/env python
import os
import sys
import glob
import argparse
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
jobs = sorted([d for d in os.listdir(os.getcwd()) if os.path.isdir(d)])
j = rank
name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
status = open('status.' + name + '.rank' + str(rank), 'w')
while j < len(jobs):
running = os.path.isfile(os.path.join(jobs[j], 'running'))
done = os.path.isfile(os.path.join(jobs[j], 'done'))
if not running and not done:
status.write("running " + jobs[j] + "\\n")
status.flush()
os.chdir(jobs[j])
open('running', 'w').close()
error = os.system("%(job_cmd)s")
if not error:
os.remove('running')
open('done', 'w').close()
status.write(jobs[j] + " done\\n")
status.flush()
else:
status.write(jobs[j] + " failed\\n")
status.flush()
os.chdir('..')
else:
status.write(jobs[j] + " skipped\\n")
status.flush()
j += size
status.write("finished\\n")
status.close()
"""
if __name__ == "__main__":
args_dict = vars(parse_arguments())
p = PackJobs(**args_dict)
p.run()
|
[
"sys.stdout.write",
"textwrap.dedent",
"os.remove",
"argparse.ArgumentParser",
"os.path.isdir",
"os.system",
"os.path.isfile",
"collections.namedtuple",
"os.path.split",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((1640, 1736), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (1663, 1736), False, 'import argparse\n'), ((4391, 4417), 'os.path.isdir', 'os.path.isdir', (['args.folder'], {}), '(args.folder)\n', (4404, 4417), False, 'import os\n'), ((4495, 4506), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4503, 4506), False, 'import sys\n'), ((4640, 4651), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4648, 4651), False, 'import sys\n'), ((7704, 7752), 'collections.namedtuple', 'namedtuple', (['"""Job"""', "['folder', 'running', 'done']"], {}), "('Job', ['folder', 'running', 'done'])\n", (7714, 7752), False, 'from collections import namedtuple\n'), ((10599, 10635), 'os.path.join', 'os.path.join', (['self.folder', 'worker_py'], {}), '(self.folder, worker_py)\n', (10611, 10635), False, 'import os\n'), ((11598, 11620), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (11614, 11620), False, 'import sys\n'), ((11629, 11658), 'os.system', 'os.system', (['self.qjob_stat_cmd'], {}), '(self.qjob_stat_cmd)\n', (11638, 11658), False, 'import os\n'), ((11859, 11883), 'sys.stdout.write', 'sys.stdout.write', (['output'], {}), '(output)\n', (11875, 11883), False, 'import sys\n'), ((9513, 9524), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9521, 9524), False, 'import sys\n'), ((10361, 10400), 'os.path.join', 'os.path.join', (['self.folder', '"""worker*.py"""'], {}), "(self.folder, 'worker*.py')\n", (10373, 10400), False, 'import os\n'), ((10858, 10899), 'os.system', 'os.system', (["('chmod +x %s' % worker_py_path)"], {}), "('chmod +x %s' % worker_py_path)\n", (10867, 10899), False, 'import os\n'), ((10931, 10972), 'os.path.join', 'os.path.join', (['self.folder', '"""qjob*.script"""'], {}), "(self.folder, 'qjob*.script')\n", (10943, 10972), False, 'import os\n'), ((11476, 11512), 'os.path.split', 'os.path.split', (['self.qjob_script_path'], {}), '(self.qjob_script_path)\n', (11489, 11512), False, 'import os\n'), ((11525, 11588), 'os.system', 'os.system', (["('cd %s; %s %s' % (folder, self.qjob_sub_cmd, script))"], {}), "('cd %s; %s %s' % (folder, self.qjob_sub_cmd, script))\n", (11534, 11588), False, 'import os\n'), ((10776, 10822), 'textwrap.dedent', 'dedent', (['(self.worker_script_template % var_dict)'], {}), '(self.worker_script_template % var_dict)\n', (10782, 10822), False, 'from textwrap import dedent\n'), ((11232, 11276), 'textwrap.dedent', 'dedent', (['(self.qjob_script_template % var_dict)'], {}), '(self.qjob_script_template % var_dict)\n', (11238, 11276), False, 'from textwrap import dedent\n'), ((7004, 7034), 'os.path.join', 'os.path.join', (['self.folder', '"""*"""'], {}), "(self.folder, '*')\n", (7016, 7034), False, 'import os\n'), ((7060, 7077), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (7074, 7077), False, 'import os\n'), ((8029, 8052), 'os.listdir', 'os.listdir', (['self.folder'], {}), '(self.folder)\n', (8039, 8052), False, 'import os\n'), ((12766, 12777), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12774, 12777), False, 'import sys\n'), ((7103, 7115), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (7112, 7115), False, 'import os\n'), ((7344, 7384), 'os.path.join', 'os.path.join', (['self.folder', '"""qjob.script"""'], {}), "(self.folder, 'qjob.script')\n", (7356, 7384), False, 'import os\n'), ((7411, 7433), 'os.remove', 'os.remove', (['qjob_script'], {}), '(qjob_script)\n', (7420, 7433), False, 'import os\n'), ((7476, 7515), 'os.path.join', 'os.path.join', (['self.folder', '"""worker*.py"""'], {}), "(self.folder, 'worker*.py')\n", (7488, 7515), False, 'import os\n'), ((7542, 7562), 'os.remove', 'os.remove', (['worker_py'], {}), '(worker_py)\n', (7551, 7562), False, 'import os\n'), ((7842, 7889), 'os.path.join', 'os.path.join', (['self.folder', 'subfolder', '"""running"""'], {}), "(self.folder, subfolder, 'running')\n", (7854, 7889), False, 'import os\n'), ((7938, 7982), 'os.path.join', 'os.path.join', (['self.folder', 'subfolder', '"""done"""'], {}), "(self.folder, subfolder, 'done')\n", (7950, 7982), False, 'import os\n'), ((8097, 8133), 'os.path.join', 'os.path.join', (['self.folder', 'subfolder'], {}), '(self.folder, subfolder)\n', (8109, 8133), False, 'import os\n'), ((8775, 8816), 'os.path.join', 'os.path.join', (['self.folder', 'job', '"""running"""'], {}), "(self.folder, job, 'running')\n", (8787, 8816), False, 'import os\n'), ((9071, 9109), 'os.path.join', 'os.path.join', (['self.folder', 'job', '"""done"""'], {}), "(self.folder, job, 'done')\n", (9083, 9109), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
'''
Custom theano class to query the search engine.
'''
import numpy as np
import theano
from theano import gof
from theano import tensor
import parameters as prm
import utils
import average_precision
import random
class Search(theano.Op):
__props__ = ()
def __init__(self,options):
self.options = options
self.options['reformulated_queries'] = {}
def make_node(self, x1, x2, x3, x4):
assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
x1 = tensor.as_tensor_variable(x1)
x2 = tensor.as_tensor_variable(x2)
x3 = tensor.as_tensor_variable(x3)
x4 = tensor.as_tensor_variable(x4)
out = [tensor.fmatrix().type(), tensor.itensor3().type(), tensor.imatrix().type(), tensor.fmatrix().type()]
return theano.Apply(self, [x1, x2, x3, x4], out)
def perform(self, node, inputs, output_storage):
q_m = inputs[0]
D_truth = inputs[1]
n_iter = int(inputs[2])
is_train = int(inputs[3])
#outputs
metrics = np.zeros((len(q_m), len(prm.metrics_map)), np.float32)
if is_train:
max_feedback_docs = prm.max_feedback_docs_train
else:
max_feedback_docs = prm.max_feedback_docs
D_i = -2 * np.ones((len(q_m), max_feedback_docs, prm.max_words_input), np.int32)
D_gt_m = np.zeros((len(q_m), prm.max_candidates), np.float32)
D_id = np.zeros((len(q_m), prm.max_candidates), np.int32)
# no need to retrieve extra terms in the last iteration
if n_iter == prm.n_iterations - 1:
extra_terms = False
else:
extra_terms = True
# allow the search engine to cache queries only in the first iteration.
if n_iter == 0:
save_cache = prm.use_cache
else:
save_cache = False
max_cand = prm.max_candidates
qs = []
for i, q_lst in enumerate(self.options['current_queries']):
q = []
for j, word in enumerate(q_lst):
if q_m[i,j] == 1:
q.append(str(word))
q = ' '.join(q)
if len(q) == 0:
q = 'dummy'
qs.append(q)
# only used to print the reformulated queries.
self.options['reformulated_queries'][n_iter] = qs
# always return one more candidate because one of them might be the input doc.
candss = self.options['engine'].get_candidates(qs, max_cand, prm.max_feedback_docs, save_cache, extra_terms)
for i, cands in enumerate(candss):
D_truth_dic = {}
for d_truth in D_truth[i]:
if d_truth > -1:
D_truth_dic[d_truth] = 0
D_id[i,:len(cands.keys())] = cands.keys()
j = 0
m = 0
cand_ids = []
selected_docs = np.arange(prm.max_feedback_docs)
if is_train:
selected_docs = np.random.choice(selected_docs, size=prm.max_feedback_docs_train, replace=False)
for k, (cand_id, (words_idx, words)) in enumerate(cands.items()):
cand_ids.append(cand_id)
# no need to add candidate words in the last iteration.
if n_iter < prm.n_iterations - 1:
# only add docs selected by sampling (if training).
if k in selected_docs:
words = words[:prm.max_terms_per_doc]
words_idx = words_idx[:prm.max_terms_per_doc]
D_i[i,m,:len(words_idx)] = words_idx
# append empty strings, so the list size becomes <dim>.
words = words + max(0, prm.max_words_input - len(words)) * ['']
# append new words to the list of current queries.
self.options['current_queries'][i] += words
m += 1
if cand_id in D_truth_dic:
D_gt_m[i,j] = 1.
j += 1
cands_set = set(cands.keys())
if qs[i].lower() in self.options['engine'].title_id_map:
input_doc_id = self.options['engine'].title_id_map[qs[i].lower()]
# Remove input doc from returned docs.
# This operation does not raise an error if the element is not there.
cands_set.discard(input_doc_id)
intersec = len(set(D_truth_dic.keys()) & cands_set)
recall = intersec / max(1., float(len(D_truth_dic)))
precision = intersec / max(1., float(prm.max_candidates))
metrics[i,prm.metrics_map['RECALL']] = recall
metrics[i,prm.metrics_map['PRECISION']] = precision
metrics[i,prm.metrics_map['F1']] = 2 * recall * precision / max(0.01, recall + precision)
avg_precision = average_precision.compute(D_truth_dic.keys(), cand_ids)
metrics[i,prm.metrics_map['MAP']] = avg_precision
metrics[i,prm.metrics_map['LOG-GMAP']] = np.log(avg_precision + 1e-5)
output_storage[0][0] = metrics
output_storage[1][0] = D_i
output_storage[2][0] = D_id
output_storage[3][0] = D_gt_m
def grad(self, inputs, output_grads):
return [tensor.zeros_like(ii, dtype=theano.config.floatX) for ii in inputs]
|
[
"theano.tensor.as_tensor_variable",
"numpy.log",
"theano.tensor.itensor3",
"theano.Apply",
"theano.tensor.imatrix",
"theano.tensor.zeros_like",
"numpy.arange",
"theano.tensor.fmatrix",
"numpy.random.choice"
] |
[((554, 583), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x1'], {}), '(x1)\n', (579, 583), False, 'from theano import tensor\n'), ((597, 626), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x2'], {}), '(x2)\n', (622, 626), False, 'from theano import tensor\n'), ((640, 669), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x3'], {}), '(x3)\n', (665, 669), False, 'from theano import tensor\n'), ((683, 712), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x4'], {}), '(x4)\n', (708, 712), False, 'from theano import tensor\n'), ((845, 886), 'theano.Apply', 'theano.Apply', (['self', '[x1, x2, x3, x4]', 'out'], {}), '(self, [x1, x2, x3, x4], out)\n', (857, 886), False, 'import theano\n'), ((2934, 2966), 'numpy.arange', 'np.arange', (['prm.max_feedback_docs'], {}), '(prm.max_feedback_docs)\n', (2943, 2966), True, 'import numpy as np\n'), ((5171, 5200), 'numpy.log', 'np.log', (['(avg_precision + 1e-05)'], {}), '(avg_precision + 1e-05)\n', (5177, 5200), True, 'import numpy as np\n'), ((5408, 5457), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['ii'], {'dtype': 'theano.config.floatX'}), '(ii, dtype=theano.config.floatX)\n', (5425, 5457), False, 'from theano import tensor\n'), ((3025, 3110), 'numpy.random.choice', 'np.random.choice', (['selected_docs'], {'size': 'prm.max_feedback_docs_train', 'replace': '(False)'}), '(selected_docs, size=prm.max_feedback_docs_train, replace=False\n )\n', (3041, 3110), True, 'import numpy as np\n'), ((728, 744), 'theano.tensor.fmatrix', 'tensor.fmatrix', ([], {}), '()\n', (742, 744), False, 'from theano import tensor\n'), ((753, 770), 'theano.tensor.itensor3', 'tensor.itensor3', ([], {}), '()\n', (768, 770), False, 'from theano import tensor\n'), ((779, 795), 'theano.tensor.imatrix', 'tensor.imatrix', ([], {}), '()\n', (793, 795), False, 'from theano import tensor\n'), ((804, 820), 'theano.tensor.fmatrix', 'tensor.fmatrix', ([], {}), '()\n', (818, 820), False, 'from theano import tensor\n')]
|
#!/usr/bin/env python
"""
Fetch profile changes from nightscout and display their contents
"""
# Make it work on both python 2 and 3
# Probably a bit wide, but I'm still learning
from __future__ import absolute_import, with_statement, print_function, unicode_literals
# Built-in modules
import argparse
from datetime import datetime
import json
import logging
# External modules
import requests
from texttable import Texttable
logging.basicConfig(level=logging.INFO)
TIMED_ENTRIES = ['carbratio', 'sens', 'basal', 'target_low', 'target_high']
def normalize(profile, entry):
"""
Set entry to blank if it doesn't exist, thus avoiding KeyError
"""
try:
if profile[entry]:
pass
except KeyError:
profile[entry] = ''
def normalize_entry(entry):
"""
Clean up an entry before further processing
"""
logging.debug("Normalizing entry: %s", entry)
try:
if entry["timeAsSeconds"]:
pass
except KeyError:
entry_timeasseconds = datetime.strptime(entry["time"], "%H:%M")
entry[
"timeAsSeconds"] = 3600 * entry_timeasseconds.hour + 60 * entry_timeasseconds.minute
try:
if entry["time"]:
pass
except KeyError:
entry_hour = int(entry['timeAsSeconds'] / 3600)
entry_minute = int(entry['timeAsSeconds'] % 60)
entry["time"] = str(entry_hour).rjust(
2, '0') + ":" + str(entry_minute).rjust(2, '0')
entry["start"] = entry["time"] + ":00"
entry["minutes"] = int(entry["timeAsSeconds"]) / 60
def get_profile_switches(nightscout, token, date_from, count):
"""
Get list of profile switch events
"""
p_url = (
nightscout +
"/api/v1/treatments.json?find[eventType][$eq]=Profile%20Switch&count="
+ count + "&find[created_at][$gte]=" + date_from)
if token is not None:
p_url = p_url + "&token=" + token
p_switch = requests.get(p_url).json()
logging.debug("Profiles: %s", p_switch)
for profile in p_switch:
print("Profile named {} enabled at {} for duration {}".format(
profile['profile'], profile['created_at'], profile['duration']))
extracted_profile = json.loads(profile['profileJson'])
extracted_profile['name'] = profile['profile']
for key in ['timezone', 'delay', 'startDate']:
normalize(extracted_profile, key)
for entry_type in TIMED_ENTRIES:
for entry in extracted_profile[entry_type]:
normalize_entry(entry)
display_text(extracted_profile)
def display_text(p_data):
"""
Display profile in text format
"""
# p_data = profile_data[0]["store"][profile_name]
logging.debug("Data keys: %s", p_data.keys())
# Single value data
singletons = Texttable()
singletons.set_deco(Texttable.HEADER)
singletons.set_cols_align(["c", "c", "c", "c", "c", "c"])
singletons.add_rows([
["Profile name", "Timezone", "Units", "DIA", "Delay", "Start date"],
[
p_data["name"],
p_data["timezone"],
p_data["units"],
p_data["dia"],
p_data["delay"],
p_data["startDate"],
],
])
print(singletons.draw() + "\n")
times = {}
tgt_low = {v["time"]: v["value"] for v in p_data["target_low"]}
tgt_high = {v["time"]: v["value"] for v in p_data["target_high"]}
carb_ratio = {v["time"]: v["value"] for v in p_data["carbratio"]}
sens = {v["time"]: v["value"] for v in p_data["sens"]}
basal = {v["time"]: v["value"] for v in p_data["basal"]}
logging.debug(tgt_high, tgt_low, carb_ratio, sens, basal)
for (time, basal) in basal.items():
times.setdefault(time, {})
times[time]["basal"] = basal
for (time, sens) in sens.items():
times.setdefault(time, {})
times[time]["sens"] = sens
for (time, c_r) in carb_ratio.items():
times.setdefault(time, {})
times[time]["carbratio"] = c_r
for (time, tgt_h) in tgt_high.items():
times.setdefault(time, {})
times[time]["tgt_high"] = tgt_h
for (time, tgt_l) in tgt_low.items():
times.setdefault(time, {})
times[time]["tgt_low"] = tgt_l
logging.debug("Times: %s", times)
times_list = [["Time", "Basal", "ISF", "CR", "Target Low", "Target High"]]
for time in sorted(times.keys()):
times_list.append([
time,
times[time].get("basal", ""),
times[time].get("sens", ""),
times[time].get("carbratio", ""),
times[time].get("tgt_low", ""),
times[time].get("tgt_high", ""),
])
times_table = Texttable()
times_table.set_cols_align(["c", "c", "c", "c", "c", "c"])
times_table.add_rows(times_list)
print(times_table.draw() + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get nightscout profile.")
parser.add_argument(
"--nightscout",
help="Nightscout URL",
required=True,
nargs="?",
const="http://127.0.0.1:1337",
default="http://127.0.0.1:1337",
)
parser.add_argument("--token", help="Authenticaton token")
parser.add_argument("--from",
help="Starting date to look for profile change events",
dest="date_from")
parser.add_argument("--count", help="Number of profiles to display")
logging.debug(vars(parser.parse_args()))
# https://stackoverflow.com/questions/4575747/get-selected-subcommand-with-argparse/44948406#44948406
# I have no idea what it does, but it seems to do the trick
kwargs = vars(parser.parse_args())
get_profile_switches(**kwargs)
|
[
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"json.loads",
"datetime.datetime.strptime",
"requests.get",
"texttable.Texttable"
] |
[((431, 470), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (450, 470), False, 'import logging\n'), ((864, 909), 'logging.debug', 'logging.debug', (['"""Normalizing entry: %s"""', 'entry'], {}), "('Normalizing entry: %s', entry)\n", (877, 909), False, 'import logging\n'), ((1972, 2011), 'logging.debug', 'logging.debug', (['"""Profiles: %s"""', 'p_switch'], {}), "('Profiles: %s', p_switch)\n", (1985, 2011), False, 'import logging\n'), ((2809, 2820), 'texttable.Texttable', 'Texttable', ([], {}), '()\n', (2818, 2820), False, 'from texttable import Texttable\n'), ((3618, 3675), 'logging.debug', 'logging.debug', (['tgt_high', 'tgt_low', 'carb_ratio', 'sens', 'basal'], {}), '(tgt_high, tgt_low, carb_ratio, sens, basal)\n', (3631, 3675), False, 'import logging\n'), ((4251, 4284), 'logging.debug', 'logging.debug', (['"""Times: %s"""', 'times'], {}), "('Times: %s', times)\n", (4264, 4284), False, 'import logging\n'), ((4696, 4707), 'texttable.Texttable', 'Texttable', ([], {}), '()\n', (4705, 4707), False, 'from texttable import Texttable\n'), ((4887, 4949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get nightscout profile."""'}), "(description='Get nightscout profile.')\n", (4910, 4949), False, 'import argparse\n'), ((2217, 2251), 'json.loads', 'json.loads', (["profile['profileJson']"], {}), "(profile['profileJson'])\n", (2227, 2251), False, 'import json\n'), ((1022, 1063), 'datetime.datetime.strptime', 'datetime.strptime', (["entry['time']", '"""%H:%M"""'], {}), "(entry['time'], '%H:%M')\n", (1039, 1063), False, 'from datetime import datetime\n'), ((1941, 1960), 'requests.get', 'requests.get', (['p_url'], {}), '(p_url)\n', (1953, 1960), False, 'import requests\n')]
|
import hashlib
import logging
import os
import shutil
import sys
LOGGER = logging.getLogger()
_MISSING_FILE_PATH_MSG = "Missing parameter file_path: %s"
def generate_sha256_checksum(file_path):
'''
Purpose: Calculate the SHA256 checksum of a given file
Parameters: Path to file
Returns: The SHA256 checksum or None if something went wrong during calculation
'''
if not file_path \
or not os.path.isfile(file_path):
LOGGER.debug(_MISSING_FILE_PATH_MSG, str(file_path))
return None
sha_hash = hashlib.sha256()
buffer = 4096
with open(file_path, 'rb') as sha_file:
while True:
data = sha_file.read(buffer)
if not data:
break
sha_hash.update(data)
return sha_hash.hexdigest()
def generate_md5_checksum(file_path):
'''
Purpose: Will calculate a file's checksum (md5)
Returns: either False or the MD5 checksum of the file
Parameters: path to file
'''
if not file_path:
LOGGER.debug(_MISSING_FILE_PATH_MSG, str(file_path))
return None
checksum_readable = ""
if os.path.isfile(file_path):
with open(file_path, mode='rb') as checksum_file:
checksum = hashlib.md5()
for buf in iter(partial(checksum_file.read, 128), b''):
checksum.update(buf)
checksum_readable = checksum.hexdigest()
else:
return None
return checksum_readable
|
[
"hashlib.sha256",
"os.path.isfile",
"hashlib.md5",
"logging.getLogger"
] |
[((75, 94), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (92, 94), False, 'import logging\n'), ((549, 565), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (563, 565), False, 'import hashlib\n'), ((1143, 1168), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1157, 1168), False, 'import os\n'), ((425, 450), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (439, 450), False, 'import os\n'), ((1251, 1264), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (1262, 1264), False, 'import hashlib\n')]
|
# -*- coding: utf-8 -*-
import json
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Tuple, Dict, Set, Callable
from warnings import warn
from pandas import DataFrame, merge, Series, concat
from .exceptions import FileLoaderError, ValidationError, ConfigurationError
from .io import first_int_in_filename_key, FileLoader, CSVLoader
from .validators import DataFrameValidator
logger = logging.getLogger(__name__)
class BaseEvaluation(ABC):
def __init__(
self,
*,
ground_truth_path: Path = Path("/usr/src/evaluation/ground-truth/"),
predictions_path: Path = Path("/input/"),
file_sorter_key: Callable = first_int_in_filename_key,
file_loader: FileLoader,
validators: Tuple[DataFrameValidator, ...],
join_key: str = None,
aggregates: Set[str] = {
"mean",
"std",
"min",
"max",
"25%",
"50%",
"75%",
"count",
"uniq",
"freq",
},
output_file: Path = Path("/output/metrics.json"),
):
self._ground_truth_path = ground_truth_path
self._predictions_path = predictions_path
self._file_sorter_key = file_sorter_key
self._file_loader = file_loader
self._validators = validators
self._join_key = join_key
self._aggregates = aggregates
self._output_file = output_file
self._ground_truth_cases = DataFrame()
self._predictions_cases = DataFrame()
self._cases = DataFrame()
self._case_results = DataFrame()
self._aggregate_results = {}
super().__init__()
if isinstance(self._file_loader, CSVLoader) and self._join_key is None:
raise ConfigurationError(
f"You must set a `join_key` when using {self._file_loader}."
)
@property
def _metrics(self):
return {
"case": self._case_results.to_dict(),
"aggregates": self._aggregate_results,
}
def evaluate(self):
self.load()
self.validate()
self.merge_ground_truth_and_predictions()
self.cross_validate()
self.score()
self.save()
def load(self):
self._ground_truth_cases = self._load_cases(
folder=self._ground_truth_path
)
self._predictions_cases = self._load_cases(
folder=self._predictions_path
)
def _load_cases(self, *, folder: Path) -> DataFrame:
cases = None
for f in sorted(folder.glob("**/*"), key=self._file_sorter_key):
try:
new_cases = self._file_loader.load(fname=f)
except FileLoaderError:
logger.warning(
f"Could not load {f.name} using {self._file_loader}."
)
else:
if cases is None:
cases = [new_cases]
else:
cases.append(new_cases)
if cases is None:
raise FileLoaderError(
f"Could not load and files in {folder} with "
f"{self._file_loader}."
)
return DataFrame(cases)
def validate(self):
self._validate_data_frame(df=self._ground_truth_cases)
self._validate_data_frame(df=self._predictions_cases)
def _validate_data_frame(self, *, df: DataFrame):
for validator in self._validators:
validator.validate(df=df)
@abstractmethod
def merge_ground_truth_and_predictions(self):
pass
@abstractmethod
def cross_validate(self):
pass
def _raise_missing_predictions_error(self, *, missing=None):
if missing is not None:
message = (
"Predictions missing: you did not submit predictions for "
f"{missing}. Please try again."
)
else:
message = (
"Predictions missing: you did not submit enough predictions, "
"please try again."
)
raise ValidationError(message)
def _raise_extra_predictions_error(self, *, extra=None):
if extra is not None:
message = (
"Too many predictions: we do not have the ground truth data "
f"for {extra}. Please try again."
)
else:
message = (
"Too many predictions: you submitted too many predictions, "
"please try again."
)
raise ValidationError(message)
@abstractmethod
def score(self):
pass
# noinspection PyUnusedLocal
@staticmethod
def score_case(*, idx: int, case: DataFrame) -> Dict:
return {}
def score_aggregates(self) -> Dict:
aggregate_results = {}
for col in self._case_results.columns:
aggregate_results[col] = self.aggregate_series(
series=self._case_results[col]
)
return aggregate_results
def aggregate_series(self, *, series: Series) -> Dict:
summary = series.describe()
valid_keys = [a for a in self._aggregates if a in summary]
series_summary = {}
for k in valid_keys:
value = summary[k]
# % in keys could cause problems when looking up values later
key = k.replace("%", "pc")
try:
json.dumps(value)
except TypeError:
logger.warning(
f"Could not serialize {key}: {value} as json, "
f"so converting {value} to int."
)
value = int(value)
series_summary[key] = value
return series_summary
def save(self):
self.write_metrics_json()
def write_metrics_json(self):
with open(self._output_file, "w") as f:
f.write(json.dumps(self._metrics))
class ClassificationEvaluation(BaseEvaluation):
"""
ClassificationEvaluations have the same number of predictions as the
number of ground truth cases. These can be things like, what is the
stage of this case, or segment some things in this case.
"""
def merge_ground_truth_and_predictions(self):
if self._join_key:
kwargs = {"on": self._join_key}
else:
kwargs = {"left_index": True, "right_index": True}
self._cases = merge(
left=self._ground_truth_cases,
right=self._predictions_cases,
indicator=True,
how="outer",
suffixes=("_ground_truth", "_prediction"),
**kwargs,
)
def cross_validate(self):
missing = [p for _, p in self._cases.iterrows() if
p["_merge"] == "left_only"]
if missing:
if self._join_key:
missing = [p[self._join_key] for p in missing]
self._raise_missing_predictions_error(missing=missing)
extra = [p for _, p in self._cases.iterrows() if
p["_merge"] == "right_only"]
if extra:
if self._join_key:
extra = [p[self._join_key] for p in extra]
self._raise_extra_predictions_error(extra=extra)
def score(self):
self._case_results = DataFrame()
for idx, case in self._cases.iterrows():
self._case_results = self._case_results.append(
self.score_case(idx=idx, case=case), ignore_index=True
)
self._aggregate_results = self.score_aggregates()
class Evaluation(ClassificationEvaluation):
"""
Legacy class, you should use ClassificationEvaluation instead.
"""
def __init__(self, *args, **kwargs):
warn(
(
"The Evaluation class is deprecated, "
"please use ClassificationEvaluation instead"
),
DeprecationWarning
)
super().__init__(*args, **kwargs)
class DetectionEvaluation(BaseEvaluation):
"""
DetectionEvaluations have a different number of predictions from the
number of ground truth annotations. An example would be detecting lung
nodules in a CT volume, or malignant cells in a pathology slide.
"""
def merge_ground_truth_and_predictions(self):
self._cases = concat(
[self._ground_truth_cases, self._predictions_cases],
keys=["ground_truth", "predictions"]
)
def cross_validate(self):
expected_keys = set(self._ground_truth_cases[self._join_key])
submitted_keys = set(self._predictions_cases[self._join_key])
missing = expected_keys - submitted_keys
if missing:
self._raise_missing_predictions_error(missing=missing)
extra = submitted_keys - expected_keys
if extra:
self._raise_extra_predictions_error(extra=extra)
def score(self):
cases = set(self._ground_truth_cases[self._join_key])
self._case_results = DataFrame()
for idx, case in enumerate(cases):
self._case_results = self._case_results.append(
self.score_case(
idx=idx,
case=self._cases.loc[self._cases[self._join_key] == case],
), ignore_index=True
)
self._aggregate_results = self.score_aggregates()
def score_aggregates(self):
aggregate_results = super().score_aggregates()
totals = self._case_results.sum()
for s in totals.index:
aggregate_results[s]["sum"] = totals[s]
tp = aggregate_results["true_positives"]["sum"]
fp = aggregate_results["false_positives"]["sum"]
fn = aggregate_results["false_negatives"]["sum"]
aggregate_results["precision"] = tp / (tp + fp)
aggregate_results["recall"] = tp / (tp + fn)
aggregate_results["f1_score"] = 2 * tp / ((2 * tp) + fp + fn)
return aggregate_results
|
[
"pandas.DataFrame",
"pandas.merge",
"json.dumps",
"pathlib.Path",
"warnings.warn",
"pandas.concat",
"logging.getLogger"
] |
[((433, 460), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (450, 460), False, 'import logging\n'), ((567, 608), 'pathlib.Path', 'Path', (['"""/usr/src/evaluation/ground-truth/"""'], {}), "('/usr/src/evaluation/ground-truth/')\n", (571, 608), False, 'from pathlib import Path\n'), ((643, 658), 'pathlib.Path', 'Path', (['"""/input/"""'], {}), "('/input/')\n", (647, 658), False, 'from pathlib import Path\n'), ((1105, 1133), 'pathlib.Path', 'Path', (['"""/output/metrics.json"""'], {}), "('/output/metrics.json')\n", (1109, 1133), False, 'from pathlib import Path\n'), ((1518, 1529), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1527, 1529), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((1564, 1575), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1573, 1575), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((1599, 1610), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1608, 1610), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((1641, 1652), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1650, 1652), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((3257, 3273), 'pandas.DataFrame', 'DataFrame', (['cases'], {}), '(cases)\n', (3266, 3273), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((6501, 6658), 'pandas.merge', 'merge', ([], {'left': 'self._ground_truth_cases', 'right': 'self._predictions_cases', 'indicator': '(True)', 'how': '"""outer"""', 'suffixes': "('_ground_truth', '_prediction')"}), "(left=self._ground_truth_cases, right=self._predictions_cases,\n indicator=True, how='outer', suffixes=('_ground_truth', '_prediction'),\n **kwargs)\n", (6506, 6658), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((7378, 7389), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (7387, 7389), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((7821, 7938), 'warnings.warn', 'warn', (['"""The Evaluation class is deprecated, please use ClassificationEvaluation instead"""', 'DeprecationWarning'], {}), "(\n 'The Evaluation class is deprecated, please use ClassificationEvaluation instead'\n , DeprecationWarning)\n", (7825, 7938), False, 'from warnings import warn\n'), ((8407, 8509), 'pandas.concat', 'concat', (['[self._ground_truth_cases, self._predictions_cases]'], {'keys': "['ground_truth', 'predictions']"}), "([self._ground_truth_cases, self._predictions_cases], keys=[\n 'ground_truth', 'predictions'])\n", (8413, 8509), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((9088, 9099), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (9097, 9099), False, 'from pandas import DataFrame, merge, Series, concat\n'), ((5496, 5513), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (5506, 5513), False, 'import json\n'), ((5980, 6005), 'json.dumps', 'json.dumps', (['self._metrics'], {}), '(self._metrics)\n', (5990, 6005), False, 'import json\n')]
|
import pygame
def load(manager, params):
return pygame.quit()
|
[
"pygame.quit"
] |
[((53, 66), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (64, 66), False, 'import pygame\n')]
|
import httptools
import unittest
from unittest import mock
RESPONSE1_HEAD = b'''HTTP/1.1 200 OK
Date: Mon, 23 May 2005 22:38:34 GMT
Server: Apache/1.3.3.7
(Unix) (Red-Hat/Linux)
Last-Modified: Wed, 08 Jan 2003 23:11:55 GMT
ETag: "3f80f-1b6-3e1cb03b"
Content-Type: text/html;
charset=UTF-8
Content-Length: 130
Accept-Ranges: bytes
Connection: close
'''
RESPONSE1_BODY = b'''
<html>
<head>
<title>An Example Page</title>
</head>
<body>
Hello World, this is a very simple HTML document.
</body>
</html>'''
CHUNKED_REQUEST1_1 = b'''POST /test.php?a=b+c HTTP/1.2
User-Agent: Fooo
Host: bar
Transfer-Encoding: chunked
5\r\nhello\r\n6\r\n world\r\n'''
CHUNKED_REQUEST1_2 = b'''0\r\nVary: *\r\nUser-Agent: spam\r\n\r\n'''
class TestResponseParser(unittest.TestCase):
def test_parser_response_1(self):
m = mock.Mock()
headers = {}
m.on_header.side_effect = headers.__setitem__
p = httptools.HttpResponseParser(m)
p.feed_data(RESPONSE1_HEAD)
self.assertEqual(p.get_http_version(), '1.1')
self.assertEqual(p.get_status_code(), 200)
m.on_status.assert_called_once_with(b'OK')
m.on_headers_complete.assert_called_once_with()
self.assertEqual(m.on_header.call_count, 8)
self.assertEqual(len(headers), 8)
self.assertEqual(headers.get(b'Connection'), b'close')
self.assertEqual(headers.get(b'Content-Type'),
b'text/html; charset=UTF-8')
self.assertFalse(m.on_body.called)
p.feed_data(bytearray(RESPONSE1_BODY))
m.on_body.assert_called_once_with(RESPONSE1_BODY)
m.on_message_complete.assert_called_once_with()
self.assertFalse(m.on_url.called)
self.assertFalse(m.on_chunk_header.called)
self.assertFalse(m.on_chunk_complete.called)
with self.assertRaisesRegex(
httptools.HttpParserError,
'data received after completed connection'):
p.feed_data(b'12123123')
def test_parser_response_2(self):
with self.assertRaisesRegex(TypeError, 'expected bytes'):
httptools.HttpResponseParser(None).feed_data('')
def test_parser_response_3(self):
callbacks = {'on_header', 'on_headers_complete', 'on_body',
'on_message_complete'}
for cbname in callbacks:
with self.subTest('{} callback fails correctly'.format(cbname)):
with self.assertRaisesRegex(httptools.HttpParserCallbackError,
'callback failed'):
m = mock.Mock()
getattr(m, cbname).side_effect = Exception()
p = httptools.HttpResponseParser(m)
p.feed_data(RESPONSE1_HEAD + RESPONSE1_BODY)
def test_parser_response_4(self):
p = httptools.HttpResponseParser(None)
with self.assertRaises(httptools.HttpParserInvalidStatusError):
p.feed_data(b'HTTP/1.1 1299 FOOSPAM\r\n')
def test_parser_response_5(self):
m = mock.Mock()
m.on_status = None
m.on_header = None
m.on_body = None
m.on_headers_complete = None
m.on_chunk_header = None
m.on_chunk_complete = None
p = httptools.HttpResponseParser(m)
p.feed_data(RESPONSE1_HEAD)
p.feed_data(RESPONSE1_BODY)
m.on_message_complete.assert_called_once_with()
class TestRequestParser(unittest.TestCase):
def test_parser_request_chunked_1(self):
m = mock.Mock()
p = httptools.HttpRequestParser(m)
p.feed_data(CHUNKED_REQUEST1_1)
self.assertEqual(p.get_method(), b'POST')
m.on_url.assert_called_once_with(b'/test.php?a=b+c')
self.assertEqual(p.get_http_version(), '1.2')
m.on_header.assert_called_with(b'Transfer-Encoding', b'chunked')
m.on_chunk_header.assert_called_with()
m.on_chunk_complete.assert_called_with()
self.assertFalse(m.on_message_complete.called)
m.reset_mock()
p.feed_data(CHUNKED_REQUEST1_2)
m.on_chunk_header.assert_called_with()
m.on_chunk_complete.assert_called_with()
m.on_header.assert_called_with(b'User-Agent', b'spam')
self.assertEqual(m.on_header.call_count, 2)
m.on_message_complete.assert_called_once_with()
def test_parser_request_chunked_2(self):
m = mock.Mock()
headers = {}
m.on_header.side_effect = headers.__setitem__
m.on_url = None
m.on_body = None
m.on_headers_complete = None
m.on_chunk_header = None
m.on_chunk_complete = None
p = httptools.HttpRequestParser(m)
p.feed_data(CHUNKED_REQUEST1_1)
p.feed_data(CHUNKED_REQUEST1_2)
self.assertEqual(
headers,
{b'User-Agent': b'spam',
b'Transfer-Encoding': b'chunked',
b'Host': b'bar',
b'Vary': b'*'})
def test_parser_request_2(self):
p = httptools.HttpRequestParser(None)
with self.assertRaises(httptools.HttpParserInvalidMethodError):
p.feed_data(b'SPAM /test.php?a=b+c HTTP/1.2')
def test_parser_request_3(self):
p = httptools.HttpRequestParser(None)
with self.assertRaises(httptools.HttpParserInvalidURLError):
p.feed_data(b'POST HTTP/1.2')
class TestUrlParser(unittest.TestCase):
def parse(self, url:bytes):
parsed = httptools.parse_url(url)
return (parsed.schema, parsed.host, parsed.port, parsed.path,
parsed.query, parsed.fragment, parsed.userinfo)
def test_parser_url_1(self):
self.assertEqual(
self.parse(b'dsf://aaa/b/c?aa#123'),
(b'dsf', b'aaa', None, b'/b/c', b'aa', b'123', None))
self.assertEqual(
self.parse(b'dsf://i:n@aaa:88/b/c?aa#123'),
(b'dsf', b'aaa', 88, b'/b/c', b'aa', b'123', b'i:n'))
self.assertEqual(
self.parse(b'////'),
(None, None, None, b'////', None, None, None))
self.assertEqual(
self.parse(b'////1/1?a=b&c[]=d&c[]=z'),
(None, None, None, b'////1/1', b'a=b&c[]=d&c[]=z', None, None))
self.assertEqual(
self.parse(b'/////?#123'),
(None, None, None, b'/////', None, b'123', None))
self.assertEqual(
self.parse(b'/a/b/c?b=1&'),
(None, None, None, b'/a/b/c', b'b=1&', None, None))
def test_parser_url_2(self):
self.assertEqual(
self.parse(b''),
(None, None, None, None, None, None, None))
def test_parser_url_3(self):
with self.assertRaises(httptools.HttpParserInvalidURLError):
self.parse(b' ')
def test_parser_url_4(self):
with self.assertRaises(httptools.HttpParserInvalidURLError):
self.parse(b':///1')
def test_parser_url_5(self):
self.assertEqual(
self.parse(b'http://[fdf8:f53e:61e4::18:4]:67/'),
(b'http', b'fdf8:f53e:61e4::18:4', 67, b'/', None, None, None))
def test_parser_url_6(self):
self.assertEqual(
self.parse(bytearray(b'/')),
(None, None, None, b'/', None, None, None))
def test_parser_url_7(self):
url = httptools.parse_url(b'/')
with self.assertRaisesRegex(AttributeError, 'not writable'):
url.port = 0
def test_parser_url_8(self):
with self.assertRaises(TypeError):
httptools.parse_url(None)
|
[
"httptools.parse_url",
"unittest.mock.Mock",
"httptools.HttpRequestParser",
"httptools.HttpResponseParser"
] |
[((835, 846), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (844, 846), False, 'from unittest import mock\n'), ((936, 967), 'httptools.HttpResponseParser', 'httptools.HttpResponseParser', (['m'], {}), '(m)\n', (964, 967), False, 'import httptools\n'), ((2857, 2891), 'httptools.HttpResponseParser', 'httptools.HttpResponseParser', (['None'], {}), '(None)\n', (2885, 2891), False, 'import httptools\n'), ((3069, 3080), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3078, 3080), False, 'from unittest import mock\n'), ((3278, 3309), 'httptools.HttpResponseParser', 'httptools.HttpResponseParser', (['m'], {}), '(m)\n', (3306, 3309), False, 'import httptools\n'), ((3543, 3554), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3552, 3554), False, 'from unittest import mock\n'), ((3567, 3597), 'httptools.HttpRequestParser', 'httptools.HttpRequestParser', (['m'], {}), '(m)\n', (3594, 3597), False, 'import httptools\n'), ((4423, 4434), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (4432, 4434), False, 'from unittest import mock\n'), ((4679, 4709), 'httptools.HttpRequestParser', 'httptools.HttpRequestParser', (['m'], {}), '(m)\n', (4706, 4709), False, 'import httptools\n'), ((5031, 5064), 'httptools.HttpRequestParser', 'httptools.HttpRequestParser', (['None'], {}), '(None)\n', (5058, 5064), False, 'import httptools\n'), ((5245, 5278), 'httptools.HttpRequestParser', 'httptools.HttpRequestParser', (['None'], {}), '(None)\n', (5272, 5278), False, 'import httptools\n'), ((5483, 5507), 'httptools.parse_url', 'httptools.parse_url', (['url'], {}), '(url)\n', (5502, 5507), False, 'import httptools\n'), ((7315, 7340), 'httptools.parse_url', 'httptools.parse_url', (["b'/'"], {}), "(b'/')\n", (7334, 7340), False, 'import httptools\n'), ((7524, 7549), 'httptools.parse_url', 'httptools.parse_url', (['None'], {}), '(None)\n', (7543, 7549), False, 'import httptools\n'), ((2128, 2162), 'httptools.HttpResponseParser', 'httptools.HttpResponseParser', (['None'], {}), '(None)\n', (2156, 2162), False, 'import httptools\n'), ((2607, 2618), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2616, 2618), False, 'from unittest import mock\n'), ((2709, 2740), 'httptools.HttpResponseParser', 'httptools.HttpResponseParser', (['m'], {}), '(m)\n', (2737, 2740), False, 'import httptools\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.