code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# import the necessary packages
from pyimagesearch.nn.conv.lenet import LeNet
from tensorflow.keras.utils import plot_model
model = LeNet.build(28, 28, 3, 3)
plot_model(model, show_shapes=True, to_file="lenet.png")
|
[
"tensorflow.keras.utils.plot_model",
"pyimagesearch.nn.conv.lenet.LeNet.build"
] |
[((133, 158), 'pyimagesearch.nn.conv.lenet.LeNet.build', 'LeNet.build', (['(28)', '(28)', '(3)', '(3)'], {}), '(28, 28, 3, 3)\n', (144, 158), False, 'from pyimagesearch.nn.conv.lenet import LeNet\n'), ((159, 215), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'show_shapes': '(True)', 'to_file': '"""lenet.png"""'}), "(model, show_shapes=True, to_file='lenet.png')\n", (169, 215), False, 'from tensorflow.keras.utils import plot_model\n')]
|
import itertools
import pyscipopt as scip
import geco.mips.utilities.naming as naming
def naive(graph):
model = scip.Model("Naive MaxCut")
node_variables = {}
for v in graph.nodes():
node_variables[v] = model.addVar(lb=0, ub=1, obj=0, name=str(v), vtype="B")
edge_variables = {}
all_non_negative = True
for u, v, d in graph.edges(data=True):
edge_name = naming.undirected_edge_name(u, v)
weight = d["weight"]
edge_variables[edge_name] = model.addVar(
lb=0, ub=1, obj=weight, name=edge_name, vtype="B"
)
if weight < 0:
all_non_negative = False
model.setMaximize()
for u, v, d in graph.edges(data=True):
edge_name = naming.undirected_edge_name(u, v)
model.addCons(
node_variables[u] + node_variables[v] + edge_variables[edge_name] <= 2
)
model.addCons(
-node_variables[u] - node_variables[v] + edge_variables[edge_name] <= 0
)
if not all_non_negative:
model.addCons(
node_variables[u] - node_variables[v] - edge_variables[edge_name] <= 0
)
model.addCons(
-node_variables[u] + node_variables[v] - edge_variables[edge_name] <= 0
)
return (node_variables, edge_variables), model
def triangle(graph):
model = scip.Model("Triangle MaxCut")
edge_variables = {}
for u, v in itertools.combinations(graph.nodes(), 2):
edge_name = naming.undirected_edge_name(u, v)
if graph.has_edge(u, v):
weight = graph.get_edge_data(u, v)["weight"]
else:
weight = 0
edge_variables[edge_name] = model.addVar(
lb=0, ub=1, obj=weight, name=edge_name, vtype="B"
)
model.setMaximize()
for i, j, k in itertools.combinations(graph.nodes(), 3):
x_ij = _get_edge_variable(i, j, edge_variables)
x_ik = _get_edge_variable(i, k, edge_variables)
x_kj = _get_edge_variable(k, j, edge_variables)
model.addCons(x_ij <= x_ik + x_kj)
model.addCons(x_ij + x_ik + x_kj <= 2)
return edge_variables, model
def _get_edge_variable(u, v, edge_variables):
edge_name = naming.undirected_edge_name(u, v)
return edge_variables[edge_name]
|
[
"geco.mips.utilities.naming.undirected_edge_name",
"pyscipopt.Model"
] |
[((119, 145), 'pyscipopt.Model', 'scip.Model', (['"""Naive MaxCut"""'], {}), "('Naive MaxCut')\n", (129, 145), True, 'import pyscipopt as scip\n'), ((1377, 1406), 'pyscipopt.Model', 'scip.Model', (['"""Triangle MaxCut"""'], {}), "('Triangle MaxCut')\n", (1387, 1406), True, 'import pyscipopt as scip\n'), ((2237, 2270), 'geco.mips.utilities.naming.undirected_edge_name', 'naming.undirected_edge_name', (['u', 'v'], {}), '(u, v)\n', (2264, 2270), True, 'import geco.mips.utilities.naming as naming\n'), ((399, 432), 'geco.mips.utilities.naming.undirected_edge_name', 'naming.undirected_edge_name', (['u', 'v'], {}), '(u, v)\n', (426, 432), True, 'import geco.mips.utilities.naming as naming\n'), ((733, 766), 'geco.mips.utilities.naming.undirected_edge_name', 'naming.undirected_edge_name', (['u', 'v'], {}), '(u, v)\n', (760, 766), True, 'import geco.mips.utilities.naming as naming\n'), ((1511, 1544), 'geco.mips.utilities.naming.undirected_edge_name', 'naming.undirected_edge_name', (['u', 'v'], {}), '(u, v)\n', (1538, 1544), True, 'import geco.mips.utilities.naming as naming\n')]
|
import time
import socket
import sys
from board import Board
INF = 1.0e100
CORNERS = [(0, 0), (0, 7), (7, 0), (7, 7)]
CENTERS = [(3, 3), (3, 4), (4, 3), (4, 4)]
DANGERS = [(0, 1), (0, 6), (1, 0), (1, 1), (1, 6), (1, 7), (6, 0), (6, 1),
(6, 6), (6, 7), (7, 1), (7, 6)]
G_EDGES = [(0, 2), (0, 3), (0, 4), (0, 5), (2, 0), (3, 0), (4, 0), (5, 0),
(2, 7), (3, 7), (4, 7), (5, 7), (7, 2), (7, 3), (7, 4), (7, 5)]
NEIGHBORS = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0),
(1, 1)]
class Player(object):
def __init__(self, me, you):
self.me, self.you = me, you
self.round = 0
# handling the board
self.board = Board()
self.centers_bits = sum(self.board.spaces[i] for i in CENTERS)
self.corners_bits = sum(self.board.spaces[i] for i in CORNERS)
self.mine = 0
self.foe = 0
def get_valid_moves(self, state, player=None):
"""
state is: (p1_placed, p2_placed, whose_turn)
"""
if player is None:
player = state[2]
if self.round < 4:
centers_remaning_bits = self.centers_bits - state[0] - state[1]
return self.board.bits_to_tuples(centers_remaning_bits)
if player == 1:
return self.board.legal_actions(state[0], state[1])
else:
return self.board.legal_actions(state[1], state[0])
def play_game(self, hostname):
self.load_tree()
def init_client(hostname):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (hostname, 3333 + self.me)
print((sys.stderr, 'starting up on %s port ', server_address))
sock.connect(server_address)
for ind, thing in enumerate(sock.recv(1024).decode().split("\n")):
print("when init got {} and {}".format(ind, thing))
return sock
def read_message(sock):
message = sock.recv(1024).decode().split("\n")
turn = int(message[0])
if (turn == -999):
time.sleep(1)
self.save_tree()
sys.exit()
self.round = int(message[1])
self.t1 = float(message[2])
self.t2 = float(message[3])
print("turn", turn)
print("current time:", time.time())
print("round:", self.round)
print("t1:", self.t1)
print("t2:", self.t2)
count = 4
self.mine = 0
self.foe = 0
for i in range(8):
for j in range(8):
color = int(message[count])
if color == self.me:
self.mine += self.board.spaces[(i, j)]
elif color == self.you:
self.foe += self.board.spaces[(i, j)]
count += 1
# update board
if self.me == 1:
self.board = Board(self.mine, self.foe)
else:
self.board = Board(self.foe, self.mine)
return turn
# create a random number generator
sock = init_client(hostname)
while True:
turn = read_message(sock)
if turn == self.me:
print("============")
print("Round: ", self.round)
# print("Valid moves: ", valid_moves)
print("mine: ", self.mine)
print("FOE: ", self.foe)
print(self.board)
my_move = self.move(self.pack_state(turn))
print("My move: ", my_move)
msg = "{}\n{}\n".format(my_move[0], my_move[1])
sock.send(msg.encode())
def other_player(self, a_player):
if a_player == self.me:
return self.you
else:
return self.me
def pack_state(self, turn):
if self.me == 1:
return self.mine, self.foe, turn
else:
return self.foe, self.mine, turn
def save_tree(self):
pass
def load_tree(self):
pass
|
[
"socket.socket",
"time.time",
"time.sleep",
"board.Board",
"sys.exit"
] |
[((695, 702), 'board.Board', 'Board', ([], {}), '()\n', (700, 702), False, 'from board import Board\n'), ((1529, 1578), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1542, 1578), False, 'import socket\n'), ((2098, 2111), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2108, 2111), False, 'import time\n'), ((2161, 2171), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2169, 2171), False, 'import sys\n'), ((2360, 2371), 'time.time', 'time.time', ([], {}), '()\n', (2369, 2371), False, 'import time\n'), ((2995, 3021), 'board.Board', 'Board', (['self.mine', 'self.foe'], {}), '(self.mine, self.foe)\n', (3000, 3021), False, 'from board import Board\n'), ((3069, 3095), 'board.Board', 'Board', (['self.foe', 'self.mine'], {}), '(self.foe, self.mine)\n', (3074, 3095), False, 'from board import Board\n')]
|
# Aplicación de validación de formularios en Javascript
#
# Copyright 2018 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions: The above copyright
# notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/', methods=["POST"])
def display():
return jsonify(request.form)
|
[
"flask.jsonify",
"flask.Flask",
"flask.render_template"
] |
[((1214, 1229), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1219, 1229), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1272, 1301), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1287, 1301), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1364, 1385), 'flask.jsonify', 'jsonify', (['request.form'], {}), '(request.form)\n', (1371, 1385), False, 'from flask import Flask, render_template, request, jsonify\n')]
|
import inspect
# Indent level for writer
_INDENT_LEVEL = 2
_INDENT = ' ' * _INDENT_LEVEL
class _Writer(object):
'''Writer used to create source files with consistent formatting'''
def __init__(self, path):
'''
Args:
path (handle): File name and path to write to
'''
self._path = path
self._indent_level = 0
self._start_of_line = True
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
'''
Args:
exception_type: Type of exception that triggered the exit
exception_value: Value of exception that triggered the exit
traceback: Traceback when exit was triggered
'''
# Clear the path if an uncaught exception occured while writing:
if exception_type:
self._path.truncate(0)
def indent(self):
'''Indent the writer by one level
To be used in a similiar fashion to the write() function in this class.
See documentation on the write() function for further explanation.
'''
self._indent_level += 1
return self
def dedent(self):
'''Dedent the writer by one level
To be used in a similiar fashion to the write() function in this class.
See documentation on the write() function for further explanation.
'''
if self._indent_level > 0:
self._indent_level -= 1
return self
def write(self, content='', end_in_newline=True):
'''
Write content to the file
open(path, 'w') needs to be called prior to calling this function,
typically by
````with open(file, 'w') as f:
self.write_fn(f)````
where `self` is a higher level object and `write_fn(self, file)`
would look something like
````def _write_html(self, file):
with _Writer(file) as w:
w.write('string to write')
w.write(self.string_to_write)````
Args:
content (str): Content to write, as a string
Content is cleaned using Python's `inspect.cleandoc()`
end_in_newline (bool): Whether or not to write a newline at the end
Default is True.
'''
lines = inspect.cleandoc(content).splitlines()
for index, line in enumerate(lines):
# Indent if the start of a line
if self._start_of_line:
self._path.write(_INDENT * self._indent_level)
# Write the line
self._path.write(line)
# Write a new line if there's still more content
if index < len(lines) - 1:
self._path.write('\n')
self._start_of_line = True
# If the content should end in a newline, write it
if end_in_newline:
self._path.write('\n')
self._start_of_line = True
else:
self._start_of_line = False
return self
|
[
"inspect.cleandoc"
] |
[((2383, 2408), 'inspect.cleandoc', 'inspect.cleandoc', (['content'], {}), '(content)\n', (2399, 2408), False, 'import inspect\n')]
|
import torch
from torch import nn
from gpytorch.kernels import LinearKernel,MaternKernel,RBFKernel,Kernel
from torch.nn.modules.loss import _Loss
class Log1PlusExp(torch.autograd.Function):
"""Implementation of x ↦ log(1 + exp(x))."""
@staticmethod
def forward(ctx, x):
exp = x.exp()
ctx.save_for_backward(x)
y = exp.log1p()
return x.where(torch.isinf(exp),y.half() if x.type()=='torch.cuda.HalfTensor' else y )
@staticmethod
def backward(ctx, grad_output):
x, = ctx.saved_tensors
y = (-x).exp().half() if x.type()=='torch.cuda.HalfTensor' else (-x).exp()
return grad_output / (1 + y)
class stableBCEwithlogits(_Loss):
def __init__(self, reduction='mean'):
super(stableBCEwithlogits, self).__init__(reduction=reduction)
self.f = Log1PlusExp.apply
def forward(self, x, y):
return torch.mean(self.f(x)-x*y)
class linear_benchmark(nn.Module):
def __init__(self,d):
super(linear_benchmark, self).__init__()
self.register_buffer('w',torch.ones(d))
self.objective = stableBCEwithlogits()
def forward(self,data,c,debug_xi = None):
X = data[~c, :]
Y = data[c, :]
target = torch.cat([torch.zeros(X.shape[0]),torch.ones(Y.shape[0])]).to(X.device)
data = torch.cat([X,Y])
pred = ([email protected]).squeeze()
return -self.objective(pred,target)
class MEstat(nn.Module):
def __init__(self,J,ls=10,test_nx=1,test_ny=1,asymp_n=-1,kernel_type = 'rbf',linear_var=1e-3):
super(MEstat, self).__init__()
print(ls)
self.ratio = J
self.hotelling = False
self.kernel_type = kernel_type
if kernel_type=='hotelling': #Regularization fixes it...
self.hotelling = True
self.coeff = min(min(test_nx, test_ny) ** asymp_n, 1e-2)
else:
if kernel_type=='rbf':
self.kernel_X = RBFKernel()
self.kernel_X.raw_lengthscale = nn.Parameter(torch.tensor([ls]).float(), requires_grad=False)
elif kernel_type=='linear':
self.kernel_X = LinearKernel()
self.kernel_X._set_variance(linear_var)
elif kernel_type=='matern':
self.kernel_X = MaternKernel(nu=2.5)
self.kernel_X.raw_lengthscale = nn.Parameter(torch.tensor([ls]).float(), requires_grad=False)
self.coeff = min(min(test_nx, test_ny) ** asymp_n, 1e-5)
self.kernel_base = Kernel()
def get_median_ls(self,X):
with torch.no_grad():
d = self.kernel_base.covar_dist(X,X)
return torch.sqrt(torch.median(d[d > 0]))
@staticmethod
def cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
m_mean = torch.mean(m, dim=1, keepdim=True)
m = m - m_mean
return m.matmul(m.t()).squeeze(),m_mean.squeeze()
def calculate_hotelling(self, X):
cov_X,x_bar = self.cov(X)
return cov_X,x_bar,0,0
def get_sample_witness(self,X,Y):
n_x = X.shape[0]
n_y = Y.shape[0]
idx = torch.randperm(n_x)
idy = torch.randperm(n_y)
J_x = round(n_x*self.ratio)
J_y = round(n_y*self.ratio)
T_x, T_y = X[idx[:J_x], :].detach(), Y[idy[:J_y], :].detach()
X,Y = X[idx[J_x:], :], Y[idy[J_y:], :]
return T_x,T_y,X,Y
def get_umap_stuff(self,X,Y,T):
kX = self.kernel_X(X, T).evaluate()
kY = self.kernel_X(Y,T).evaluate()
return kX,kY,torch.cat([kX,kY],dim=0)
def forward_plain(self,X,Y,T,n_x,n_y):
if not self.hotelling:
cov_X,x_bar,k_X,kX = self.calculate_ME_hotelling(X, T)
cov_Y,y_bar,k_Y,kY = self.calculate_ME_hotelling(Y, T)
else:
cov_X, x_bar, k_X, kX = self.calculate_hotelling(X)
cov_Y, y_bar, k_Y, kY = self.calculate_hotelling(Y)
pooled = 1. / (n_x + n_y - 2.) * (cov_X + cov_Y)
z = torch.unsqueeze(x_bar - y_bar, 1)
inv_z,_ = torch.solve(z,pooled.float() + self.coeff*torch.eye(pooled.shape[0]).float().to(pooled.device))
test_statistic = n_x * n_y / (n_x + n_y) * torch.sum(z * inv_z)
return test_statistic
def forward(self,data,c,debug_xi_hat=None):
X = data[~c,:]
Y = data[c,:]
tmp_dev = X.device
if not self.hotelling:
T_x,T_y,X,Y = self.get_sample_witness(X,Y)
n_x = X.shape[0]
n_y = Y.shape[0]
T = torch.cat([T_x, T_y],dim=0)
if not self.kernel_type=='linear':
_tmp = torch.cat([X, Y], dim=0).detach()
with torch.no_grad():
sig = self.get_median_ls(_tmp)
self.kernel_X.raw_lengthscale = nn.Parameter(sig.unsqueeze(-1).to(tmp_dev),requires_grad=False) # Use old setup?!??!?!?!
else:
_tmp = torch.tensor(0)
sig=0
cov_X,x_bar,k_X,kX = self.calculate_ME_hotelling(X, T)
cov_Y,y_bar,k_Y,kY = self.calculate_ME_hotelling(Y, T)
else:
_tmp = 0
n_x = X.shape[0]
n_y = Y.shape[0]
cov_X,x_bar,k_X,kX = self.calculate_hotelling(X)
cov_Y,y_bar,k_Y,kY = self.calculate_hotelling(Y)
pooled = 1./(n_x+n_y-2.) * cov_X + cov_Y*1./(n_x+n_y-2.)
z = torch.unsqueeze(x_bar-y_bar,1)
inv_z,_ = torch.solve(z.float(),pooled.float() + self.coeff*torch.eye(pooled.shape[0]).float().to(tmp_dev))
test_statistic = n_x*n_y/(n_x + n_y) * torch.sum(z*inv_z)
if test_statistic.data ==0 or test_statistic==float('inf') or test_statistic!=test_statistic: #The lengthscale be fucking me...
print(test_statistic)
print(x_bar)
print(y_bar)
print(inv_z)
print(cov_X)
print(cov_Y)
print(k_X)
print(k_Y)
print(kX)
print(kY)
print(_tmp.min(),_tmp.max())
print(sig)
print(n_x*n_y/(n_x + n_y))
print(pooled)
return test_statistic
def calculate_ME_hotelling(self, X, T):
kX = self.kernel_X(X, T).evaluate()
x_bar = torch.mean(kX, dim=0)
k_X = kX - x_bar
cov_X = k_X.t() @ k_X
return cov_X, x_bar, k_X, kX
|
[
"torch.mean",
"torch.ones",
"torch.median",
"torch.eye",
"torch.isinf",
"gpytorch.kernels.RBFKernel",
"torch.unsqueeze",
"torch.cat",
"torch.zeros",
"gpytorch.kernels.MaternKernel",
"torch.randperm",
"gpytorch.kernels.LinearKernel",
"gpytorch.kernels.Kernel",
"torch.no_grad",
"torch.sum",
"torch.tensor"
] |
[((1322, 1339), 'torch.cat', 'torch.cat', (['[X, Y]'], {}), '([X, Y])\n', (1331, 1339), False, 'import torch\n'), ((2513, 2521), 'gpytorch.kernels.Kernel', 'Kernel', ([], {}), '()\n', (2519, 2521), False, 'from gpytorch.kernels import LinearKernel, MaternKernel, RBFKernel, Kernel\n'), ((3959, 3993), 'torch.mean', 'torch.mean', (['m'], {'dim': '(1)', 'keepdim': '(True)'}), '(m, dim=1, keepdim=True)\n', (3969, 3993), False, 'import torch\n'), ((4283, 4302), 'torch.randperm', 'torch.randperm', (['n_x'], {}), '(n_x)\n', (4297, 4302), False, 'import torch\n'), ((4317, 4336), 'torch.randperm', 'torch.randperm', (['n_y'], {}), '(n_y)\n', (4331, 4336), False, 'import torch\n'), ((5143, 5176), 'torch.unsqueeze', 'torch.unsqueeze', (['(x_bar - y_bar)', '(1)'], {}), '(x_bar - y_bar, 1)\n', (5158, 5176), False, 'import torch\n'), ((6548, 6581), 'torch.unsqueeze', 'torch.unsqueeze', (['(x_bar - y_bar)', '(1)'], {}), '(x_bar - y_bar, 1)\n', (6563, 6581), False, 'import torch\n'), ((7411, 7432), 'torch.mean', 'torch.mean', (['kX'], {'dim': '(0)'}), '(kX, dim=0)\n', (7421, 7432), False, 'import torch\n'), ((385, 401), 'torch.isinf', 'torch.isinf', (['exp'], {}), '(exp)\n', (396, 401), False, 'import torch\n'), ((1061, 1074), 'torch.ones', 'torch.ones', (['d'], {}), '(d)\n', (1071, 1074), False, 'import torch\n'), ((2567, 2582), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2580, 2582), False, 'import torch\n'), ((4698, 4724), 'torch.cat', 'torch.cat', (['[kX, kY]'], {'dim': '(0)'}), '([kX, kY], dim=0)\n', (4707, 4724), False, 'import torch\n'), ((5342, 5362), 'torch.sum', 'torch.sum', (['(z * inv_z)'], {}), '(z * inv_z)\n', (5351, 5362), False, 'import torch\n'), ((5676, 5704), 'torch.cat', 'torch.cat', (['[T_x, T_y]'], {'dim': '(0)'}), '([T_x, T_y], dim=0)\n', (5685, 5704), False, 'import torch\n'), ((6742, 6762), 'torch.sum', 'torch.sum', (['(z * inv_z)'], {}), '(z * inv_z)\n', (6751, 6762), False, 'import torch\n'), ((1948, 1959), 'gpytorch.kernels.RBFKernel', 'RBFKernel', ([], {}), '()\n', (1957, 1959), False, 'from gpytorch.kernels import LinearKernel, MaternKernel, RBFKernel, Kernel\n'), ((2663, 2685), 'torch.median', 'torch.median', (['d[d > 0]'], {}), '(d[d > 0])\n', (2675, 2685), False, 'import torch\n'), ((6081, 6096), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (6093, 6096), False, 'import torch\n'), ((2142, 2156), 'gpytorch.kernels.LinearKernel', 'LinearKernel', ([], {}), '()\n', (2154, 2156), False, 'from gpytorch.kernels import LinearKernel, MaternKernel, RBFKernel, Kernel\n'), ((5829, 5844), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5842, 5844), False, 'import torch\n'), ((1245, 1268), 'torch.zeros', 'torch.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (1256, 1268), False, 'import torch\n'), ((1269, 1291), 'torch.ones', 'torch.ones', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (1279, 1291), False, 'import torch\n'), ((2285, 2305), 'gpytorch.kernels.MaternKernel', 'MaternKernel', ([], {'nu': '(2.5)'}), '(nu=2.5)\n', (2297, 2305), False, 'from gpytorch.kernels import LinearKernel, MaternKernel, RBFKernel, Kernel\n'), ((5774, 5798), 'torch.cat', 'torch.cat', (['[X, Y]'], {'dim': '(0)'}), '([X, Y], dim=0)\n', (5783, 5798), False, 'import torch\n'), ((2021, 2039), 'torch.tensor', 'torch.tensor', (['[ls]'], {}), '([ls])\n', (2033, 2039), False, 'import torch\n'), ((2367, 2385), 'torch.tensor', 'torch.tensor', (['[ls]'], {}), '([ls])\n', (2379, 2385), False, 'import torch\n'), ((5237, 5263), 'torch.eye', 'torch.eye', (['pooled.shape[0]'], {}), '(pooled.shape[0])\n', (5246, 5263), False, 'import torch\n'), ((6647, 6673), 'torch.eye', 'torch.eye', (['pooled.shape[0]'], {}), '(pooled.shape[0])\n', (6656, 6673), False, 'import torch\n')]
|
from rembg.multiprocessing import parallel_greenscreen
if __name__ == "__main__":
parallel_greenscreen("/Users/zihao/Desktop/zero/video/group15B_Short.avi",
3,
1,
"u2net_human_seg",
frame_limit=300)
|
[
"rembg.multiprocessing.parallel_greenscreen"
] |
[((92, 213), 'rembg.multiprocessing.parallel_greenscreen', 'parallel_greenscreen', (['"""/Users/zihao/Desktop/zero/video/group15B_Short.avi"""', '(3)', '(1)', '"""u2net_human_seg"""'], {'frame_limit': '(300)'}), "('/Users/zihao/Desktop/zero/video/group15B_Short.avi', \n 3, 1, 'u2net_human_seg', frame_limit=300)\n", (112, 213), False, 'from rembg.multiprocessing import parallel_greenscreen\n')]
|
from datetime import datetime, timedelta
import numpy as np
input_data={'incon_state':'current',
'EOS':1,
'source_txt':'../input/',
'ref_date':datetime(1975,1,1,0,0,0),
'z_ref':600,
'db_path':'../input/model_month.db',
'LAYERS':{1:['A',100],
2:['B', 100],
3:['C', 125],
4:['D', 60],
5:['E',30],
6:['F',65],
7:['G',40],
8:['H',65],
9:['I',30],
10:['J',100],
11:['K',50],
12:['L',250],
13:['M',200],
14:['N',400],
15:['O',400],
16:['P',200],
17:['Q',200],
18:['R', 100]},
'TITLE':'Test output TOUGH2',
'TYPE_RUN':'production',
'PARAMETERS':
{'NOITE':1,
'KDATA':2,
'MCYC':100,
'MCYPR':30,
'P':100,
'T':350,
'X':0.1,
'DELTEN':-1,
'DELTEN_LIST':[10,30,50,1000,10000,10000]
},
'TIMES':{'TIMES_N':np.arange(datetime(1985,7,1), datetime(2015,7,1), timedelta(days=120)).astype(datetime)},
'SOLVR':{
'MATSLV':5,
'ZPROCS':'Z4',
'OPROCS':'O4',
'RITMAX':0.04,
'CLOSUR':1E-6,
},
'INCONS_PARAM':{
'To':30,
'GRADTZ':0.08,
'DEPTH_TO_SURF':100,
'DELTAZ':20
},
'RPCAP':{
'IRP':3,
'RP1':0.4,
'RP2':0.03,
'ICP':1,
'ICP1':1.0E6,
'ICP2':0.0,
'ICP3':1.0,
},
'MULTI':{
'NK':1,
'NEQ':2,
'NPH':2,
'NB':6
},
'IT2':{
'T_DEV':5,
'P_DEV':10,
'h_DEV':200,
},
'WELLS':['AH-1',
'AH-2',
'AH-3',
'AH-4',
'AH-4BIS',
'AH-5',
'AH-6',
'AH-7',
'AH-8',
'AH-9',
'AH-11',
'AH-12',
'AH-13',
'AH-14',
'AH-15',
'AH-16',
'AH-16A',
'AH-17',
'AH-18',
'AH-19',
'AH-20',
'AH-21',
'AH-22',
'AH-23',
'AH-24',
'AH-25',
'AH-26',
'AH-27',
'AH-28',
'AH-29',
'AH-30',
'AH-31',
'AH-32',
'AH-33A',
'AH-33B',
'AH-33C',
'AH-34',
'AH-34A',
'AH-34B',
'AH-35A',
'AH-35B',
'AH-35C',
'AH-35D',
'AH-36',
'CH-1',
'CH-10',
'CH-7',
'CH-7BIS',
'CH-8',
'CH-9',
'CH-9A',
'CH-9B',
'CH-A'],
'MAKE_UP_WELLS':[
'ZAH-37A',
'ZAH-37B',
'ZAH-38A',
'ZAH-38B',
'ZAH-38C',
'ZAH-39A',
'ZAH-39B',
'ZAH-39C',
'XCH-9C',
'XCH-D1',
'XCH-D2',
'XCH-12A',
'XCH-12B',
'XCH-8A',
'XCH-8B',
],
'NOT_PRODUCING_WELL':['CH-D'],
}
#'XAH-2R'
mesh_setup={'mesh_creation':True ,
'Xmin':404000,
'Xmax':424000,
'Ymin':302000,
'Ymax':322000,
'x_from_boarder':1000,
'y_from_boarder':1000,
'x_space':2000,
'y_space':2000,
'x_gap_min':411300,
'x_gap_max':418500,
'y_gap_min':304500,
'y_gap_max':311250,
'x_gap_space':250,
'y_gap_space':250,
'radius_criteria':150,
'filename':'../input/well_feedzone_xyz.csv',
'filepath':'',
'toler':0.1,
'layer_to_plot':1,
'plot_names':False,
'plot_centers':False,
'plot_layer':False,
'to_steinar':True,
'to_GIS':False,
'plot_all_GIS':False,
'from_leapfrog':False,
'line_file':'',
'fault_distance':50,
'with_polygon':True,
'polygon_shape':"../input/area/polygon.shp",
"set_inac_from_poly":False,
'set_inac_from_inner':True,
'angle':10,
'rotate':True,
'colors':{1:'red',\
2:'white',\
3:'yellow',\
4:'blue',\
5:'green',\
6:'purple',\
7:'#ff69b4',\
8:'darkorange',\
9:'cyan',\
10:'magenta',\
11:'#faebd7',\
12:'#2e8b57',\
13:'#eeefff',\
14:'#da70d6',\
15:'#ff7f50',\
16:'#cd853f',\
17:'#bc8f8f',\
18:'#5f9ea0',\
19:'#daa520'}}
geners={'QA797':{'SL':'GEN',
'NS':10,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'QA763':{'SL':'GEN',
'NS':11,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'QA839':{'SL':'GEN',
'NS':12,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'QA762':{'SL':'GEN',
'NS':13,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'QA796':{'SL':'GEN',
'NS':14,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'QA795':{'SL':'GEN',
'NS':15,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'QA761':{'SL':'GEN',
'NS':16,
'TYPE':'MASS',
'GX':37,
'EX':1.1E6},
'EA833':{'SL':'SRC',
'NS':81,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
'EA866':{'SL':'SRC',
'NS':82,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
'EA897':{'SL':'SRC',
'NS':83,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
'EA865':{'SL':'SRC',
'NS':84,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
'EA896':{'SL':'SRC',
'NS':85,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
'EA831':{'SL':'SRC',
'NS':86,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
'EA864':{'SL':'SRC',
'NS':87,
'TYPE':'DELV',
'GX':5.000E-11,
'EX':1.500E+06 ,
'HG':1.000E+02},
}
#to_GIS does just one plot
#to_GIS and plot_all_GIS it plots everything
#try polygon true
#'line_file':'../input/lines.csv',
#maybe is better to take out the function to_GIS from pyamesh and run it alone
#For amesh https://askubuntu.com/questions/454253/how-to-run-32-bit-app-in-ubuntu-64-bit
#the ahuachapan model has another mesh setup
|
[
"datetime.timedelta",
"datetime.datetime"
] |
[((153, 182), 'datetime.datetime', 'datetime', (['(1975)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1975, 1, 1, 0, 0, 0)\n', (161, 182), False, 'from datetime import datetime, timedelta\n'), ((884, 904), 'datetime.datetime', 'datetime', (['(1985)', '(7)', '(1)'], {}), '(1985, 7, 1)\n', (892, 904), False, 'from datetime import datetime, timedelta\n'), ((904, 924), 'datetime.datetime', 'datetime', (['(2015)', '(7)', '(1)'], {}), '(2015, 7, 1)\n', (912, 924), False, 'from datetime import datetime, timedelta\n'), ((924, 943), 'datetime.timedelta', 'timedelta', ([], {'days': '(120)'}), '(days=120)\n', (933, 943), False, 'from datetime import datetime, timedelta\n')]
|
import os
from flask import Flask, app, flash, session
from flask_pymongo import PyMongo
from datetime import date, datetime
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.getenv('MONGO_DBNAME')
app.config["MONGO_URI"] = os.getenv('MONGO_URI')
app.config["SECRET_KEY"] = os.getenv('SECRET_KEY')
mongo = PyMongo(app)
"""Collections"""
stories_collection = mongo.db.stories
users_collection = mongo.db.users
fake_collection = None
"""Helper functions"""
def list_by_type():
list_by_type = {}
ratings = []
genres = []
fandoms = []
authors = []
if session.get('is_adult') == True:
selection = stories_collection.find()
else:
selection = stories_collection.find( {"rating": {"$nin": ["R/Adult/NSFW", "Adult/NSFW"]}})
for story in selection:
rating = story['rating']
genres_in_story = story.get('genres')
if genres_in_story != []:
for genre in genres_in_story:
genre
fandoms_in_story = story.get('fandoms')
if fandoms_in_story != []:
for fandom in fandoms_in_story:
fandom
else:
fandom = "Fandom not added"
author = story['author']
if rating not in ratings:
ratings.append(rating)
if genre not in genres:
genres.append(genre)
if fandom not in fandoms:
fandoms.append(fandom)
if author not in authors:
authors.append(author)
list_by_type.update({"ratings": ratings, "genres": genres,
"fandoms": fandoms, "authors": authors})
return list_by_type
def story_count():
story_count = []
ratings_list = list_by_type()["ratings"]
genres_list = list_by_type()["genres"]
fandoms_list = list_by_type()["fandoms"]
authors_list = list_by_type()["authors"]
for rating in ratings_list:
count = stories_collection.count_documents({"rating": rating})
count_rating = {"rating": rating, "total": count}
story_count.append(count_rating)
for genre in genres_list:
count = stories_collection.count_documents({"genres": genre})
count_genre = {"genre": genre, "total": count}
story_count.append(count_genre)
for fandom in fandoms_list:
count = stories_collection.count_documents({"fandoms": fandom})
count_fandom = {"fandom": fandom, "total": count}
story_count.append(count_fandom)
for author in authors_list:
count = stories_collection.count_documents({"author": author})
count_author = {"author": author, "total": count}
story_count.append(count_author)
return story_count
def report(item, reason_given, this_story, reported_by):
stories_collection.find_one_and_update({"url": this_story}, {'$push': {"reports": {"item_reported": item, "reported_by": reported_by, "reason_given": reason_given}}}, upsert=True)
return flash("Report sent to admins.")
def calculate_age(born):
today = date.today()
bday = datetime.strptime(born, '%Y-%m-%d')
age = today.year - bday.year - ((today.month, today.day) < (bday.month, bday.day))
return age
|
[
"flask.flash",
"flask.Flask",
"flask.session.get",
"datetime.date.today",
"datetime.datetime.strptime",
"flask_pymongo.PyMongo",
"os.getenv"
] |
[((132, 147), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (137, 147), False, 'from flask import Flask, app, flash, session\n'), ((177, 202), 'os.getenv', 'os.getenv', (['"""MONGO_DBNAME"""'], {}), "('MONGO_DBNAME')\n", (186, 202), False, 'import os\n'), ((229, 251), 'os.getenv', 'os.getenv', (['"""MONGO_URI"""'], {}), "('MONGO_URI')\n", (238, 251), False, 'import os\n'), ((279, 302), 'os.getenv', 'os.getenv', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (288, 302), False, 'import os\n'), ((312, 324), 'flask_pymongo.PyMongo', 'PyMongo', (['app'], {}), '(app)\n', (319, 324), False, 'from flask_pymongo import PyMongo\n'), ((2937, 2968), 'flask.flash', 'flash', (['"""Report sent to admins."""'], {}), "('Report sent to admins.')\n", (2942, 2968), False, 'from flask import Flask, app, flash, session\n'), ((3008, 3020), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3018, 3020), False, 'from datetime import date, datetime\n'), ((3032, 3067), 'datetime.datetime.strptime', 'datetime.strptime', (['born', '"""%Y-%m-%d"""'], {}), "(born, '%Y-%m-%d')\n", (3049, 3067), False, 'from datetime import date, datetime\n'), ((583, 606), 'flask.session.get', 'session.get', (['"""is_adult"""'], {}), "('is_adult')\n", (594, 606), False, 'from flask import Flask, app, flash, session\n')]
|
import unittest
from unittest.mock import (
AsyncMock,
call,
)
from uuid import (
uuid4,
)
from minos.saga import (
ConditionalSagaStepExecution,
LocalSagaStep,
LocalSagaStepExecution,
RemoteSagaStepExecution,
Saga,
SagaContext,
SagaExecution,
TransactionCommitter,
)
from tests.utils import (
MinosTestCase,
)
class TestTransactionCommitter(MinosTestCase):
def setUp(self) -> None:
super().setUp()
self.execution_uuid = uuid4()
# noinspection PyTypeChecker
definition = LocalSagaStep(on_execute=LocalSagaStep)
self.executed_steps = [
RemoteSagaStepExecution(definition, {"foo"}),
LocalSagaStepExecution(definition, {"bar"}),
ConditionalSagaStepExecution(
definition,
{"bar"},
inner=SagaExecution(
Saga(steps=[definition], committed=True),
self.execution_uuid,
SagaContext(),
steps=[
RemoteSagaStepExecution(definition, {"foo"}),
RemoteSagaStepExecution(definition, {"foobar"}),
],
),
),
ConditionalSagaStepExecution(definition),
]
self.committer = TransactionCommitter(self.execution_uuid, self.executed_steps)
def test_transactions(self):
expected = [
(self.execution_uuid, "bar"),
(self.execution_uuid, "foo"),
(self.execution_uuid, "foobar"),
]
self.assertEqual(expected, self.committer.transactions)
async def test_commit_true(self):
get_mock = AsyncMock()
get_mock.return_value.data.ok = True
self.broker.get_one = get_mock
send_mock = AsyncMock()
self.broker_publisher.send = send_mock
await self.committer.commit()
self.assertEqual(
[
call(data=self.execution_uuid, topic="ReserveBarTransaction", reply_topic="TheReplyTopic"),
call(data=self.execution_uuid, topic="ReserveFooTransaction", reply_topic="TheReplyTopic"),
call(data=self.execution_uuid, topic="ReserveFoobarTransaction", reply_topic="TheReplyTopic"),
call(data=self.execution_uuid, topic="CommitBarTransaction"),
call(data=self.execution_uuid, topic="CommitFooTransaction"),
call(data=self.execution_uuid, topic="CommitFoobarTransaction"),
],
send_mock.call_args_list,
)
async def test_commit_false(self):
get_mock = AsyncMock()
get_mock.return_value.data.ok = False
self.broker.get_one = get_mock
send_mock = AsyncMock()
self.broker_publisher.send = send_mock
with self.assertRaises(ValueError):
await self.committer.commit()
self.assertEqual(
[
call(data=self.execution_uuid, topic="ReserveBarTransaction", reply_topic="TheReplyTopic"),
call(data=self.execution_uuid, topic="ReserveFooTransaction", reply_topic="TheReplyTopic"),
call(data=self.execution_uuid, topic="ReserveFoobarTransaction", reply_topic="TheReplyTopic"),
call(data=self.execution_uuid, topic="RejectBarTransaction"),
call(data=self.execution_uuid, topic="RejectFooTransaction"),
call(data=self.execution_uuid, topic="RejectFoobarTransaction"),
],
send_mock.call_args_list,
)
async def test_reject(self):
get_mock = AsyncMock()
self.broker.get_one = get_mock
send_mock = AsyncMock()
self.broker_publisher.send = send_mock
await self.committer.reject()
self.assertEqual(
[
call(data=self.execution_uuid, topic="RejectBarTransaction"),
call(data=self.execution_uuid, topic="RejectFooTransaction"),
call(data=self.execution_uuid, topic="RejectFoobarTransaction"),
],
send_mock.call_args_list,
)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"minos.saga.LocalSagaStep",
"minos.saga.TransactionCommitter",
"uuid.uuid4",
"minos.saga.RemoteSagaStepExecution",
"minos.saga.Saga",
"unittest.mock.AsyncMock",
"minos.saga.ConditionalSagaStepExecution",
"minos.saga.SagaContext",
"minos.saga.LocalSagaStepExecution",
"unittest.mock.call"
] |
[((4185, 4200), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4198, 4200), False, 'import unittest\n'), ((493, 500), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (498, 500), False, 'from uuid import uuid4\n'), ((560, 599), 'minos.saga.LocalSagaStep', 'LocalSagaStep', ([], {'on_execute': 'LocalSagaStep'}), '(on_execute=LocalSagaStep)\n', (573, 599), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1335, 1397), 'minos.saga.TransactionCommitter', 'TransactionCommitter', (['self.execution_uuid', 'self.executed_steps'], {}), '(self.execution_uuid, self.executed_steps)\n', (1355, 1397), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1714, 1725), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (1723, 1725), False, 'from unittest.mock import AsyncMock, call\n'), ((1831, 1842), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (1840, 1842), False, 'from unittest.mock import AsyncMock, call\n'), ((2656, 2667), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (2665, 2667), False, 'from unittest.mock import AsyncMock, call\n'), ((2774, 2785), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (2783, 2785), False, 'from unittest.mock import AsyncMock, call\n'), ((3641, 3652), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (3650, 3652), False, 'from unittest.mock import AsyncMock, call\n'), ((3713, 3724), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (3722, 3724), False, 'from unittest.mock import AsyncMock, call\n'), ((644, 688), 'minos.saga.RemoteSagaStepExecution', 'RemoteSagaStepExecution', (['definition', "{'foo'}"], {}), "(definition, {'foo'})\n", (667, 688), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((702, 745), 'minos.saga.LocalSagaStepExecution', 'LocalSagaStepExecution', (['definition', "{'bar'}"], {}), "(definition, {'bar'})\n", (724, 745), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1257, 1297), 'minos.saga.ConditionalSagaStepExecution', 'ConditionalSagaStepExecution', (['definition'], {}), '(definition)\n', (1285, 1297), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1986, 2081), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""ReserveBarTransaction"""', 'reply_topic': '"""TheReplyTopic"""'}), "(data=self.execution_uuid, topic='ReserveBarTransaction', reply_topic=\n 'TheReplyTopic')\n", (1990, 2081), False, 'from unittest.mock import AsyncMock, call\n'), ((2094, 2189), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""ReserveFooTransaction"""', 'reply_topic': '"""TheReplyTopic"""'}), "(data=self.execution_uuid, topic='ReserveFooTransaction', reply_topic=\n 'TheReplyTopic')\n", (2098, 2189), False, 'from unittest.mock import AsyncMock, call\n'), ((2202, 2299), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""ReserveFoobarTransaction"""', 'reply_topic': '"""TheReplyTopic"""'}), "(data=self.execution_uuid, topic='ReserveFoobarTransaction',\n reply_topic='TheReplyTopic')\n", (2206, 2299), False, 'from unittest.mock import AsyncMock, call\n'), ((2313, 2373), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""CommitBarTransaction"""'}), "(data=self.execution_uuid, topic='CommitBarTransaction')\n", (2317, 2373), False, 'from unittest.mock import AsyncMock, call\n'), ((2391, 2451), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""CommitFooTransaction"""'}), "(data=self.execution_uuid, topic='CommitFooTransaction')\n", (2395, 2451), False, 'from unittest.mock import AsyncMock, call\n'), ((2469, 2532), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""CommitFoobarTransaction"""'}), "(data=self.execution_uuid, topic='CommitFoobarTransaction')\n", (2473, 2532), False, 'from unittest.mock import AsyncMock, call\n'), ((2977, 3072), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""ReserveBarTransaction"""', 'reply_topic': '"""TheReplyTopic"""'}), "(data=self.execution_uuid, topic='ReserveBarTransaction', reply_topic=\n 'TheReplyTopic')\n", (2981, 3072), False, 'from unittest.mock import AsyncMock, call\n'), ((3085, 3180), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""ReserveFooTransaction"""', 'reply_topic': '"""TheReplyTopic"""'}), "(data=self.execution_uuid, topic='ReserveFooTransaction', reply_topic=\n 'TheReplyTopic')\n", (3089, 3180), False, 'from unittest.mock import AsyncMock, call\n'), ((3193, 3290), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""ReserveFoobarTransaction"""', 'reply_topic': '"""TheReplyTopic"""'}), "(data=self.execution_uuid, topic='ReserveFoobarTransaction',\n reply_topic='TheReplyTopic')\n", (3197, 3290), False, 'from unittest.mock import AsyncMock, call\n'), ((3304, 3364), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""RejectBarTransaction"""'}), "(data=self.execution_uuid, topic='RejectBarTransaction')\n", (3308, 3364), False, 'from unittest.mock import AsyncMock, call\n'), ((3382, 3442), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""RejectFooTransaction"""'}), "(data=self.execution_uuid, topic='RejectFooTransaction')\n", (3386, 3442), False, 'from unittest.mock import AsyncMock, call\n'), ((3460, 3523), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""RejectFoobarTransaction"""'}), "(data=self.execution_uuid, topic='RejectFoobarTransaction')\n", (3464, 3523), False, 'from unittest.mock import AsyncMock, call\n'), ((3868, 3928), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""RejectBarTransaction"""'}), "(data=self.execution_uuid, topic='RejectBarTransaction')\n", (3872, 3928), False, 'from unittest.mock import AsyncMock, call\n'), ((3946, 4006), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""RejectFooTransaction"""'}), "(data=self.execution_uuid, topic='RejectFooTransaction')\n", (3950, 4006), False, 'from unittest.mock import AsyncMock, call\n'), ((4024, 4087), 'unittest.mock.call', 'call', ([], {'data': 'self.execution_uuid', 'topic': '"""RejectFoobarTransaction"""'}), "(data=self.execution_uuid, topic='RejectFoobarTransaction')\n", (4028, 4087), False, 'from unittest.mock import AsyncMock, call\n'), ((899, 939), 'minos.saga.Saga', 'Saga', ([], {'steps': '[definition]', 'committed': '(True)'}), '(steps=[definition], committed=True)\n', (903, 939), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1002, 1015), 'minos.saga.SagaContext', 'SagaContext', ([], {}), '()\n', (1013, 1015), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1069, 1113), 'minos.saga.RemoteSagaStepExecution', 'RemoteSagaStepExecution', (['definition', "{'foo'}"], {}), "(definition, {'foo'})\n", (1092, 1113), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n'), ((1139, 1186), 'minos.saga.RemoteSagaStepExecution', 'RemoteSagaStepExecution', (['definition', "{'foobar'}"], {}), "(definition, {'foobar'})\n", (1162, 1186), False, 'from minos.saga import ConditionalSagaStepExecution, LocalSagaStep, LocalSagaStepExecution, RemoteSagaStepExecution, Saga, SagaContext, SagaExecution, TransactionCommitter\n')]
|
"""
:date_created: 2020-06-28
"""
from do_py.common import R
from do_py.data_object.restriction import ManagedRestrictions
from do_py.exceptions import RestrictionError
class ManagedList(ManagedRestrictions):
"""
Use this when you need a restriction for a list of DataObject's.
"""
_restriction = R(list, type(None))
@property
def schema_value(self):
"""
:rtype: list[dict]
"""
return [self.obj_cls.schema]
def __init__(self, obj_cls, nullable=False):
"""
:param obj_cls: The DO to check each value in the list against.
:type obj_cls: DataObject
:param nullable: Valid values are a list of Do's or a NoneType.
:type nullable: bool
"""
super(ManagedList, self).__init__()
self.obj_cls = obj_cls
self.nullable = nullable
def manage(self):
if self.data is not None:
items = []
for item in self.data:
items.append(item if type(item) == self.obj_cls else self.obj_cls(item))
self.data = items
else:
if not self.nullable:
raise RestrictionError.bad_data(self.data, self._restriction.allowed)
# TODO: Unit tests
class OrderedManagedList(ManagedList):
def __init__(self, obj_cls, nullable=False, key=None, reverse=False):
"""
:param obj_cls: DataObject class reference to wrap each object in list.
:type nullable: bool
:type key: function
:type reverse: bool
"""
self.key = key
self.reverse = reverse
super(OrderedManagedList, self).__init__(obj_cls, nullable=nullable)
def manage(self):
"""
Sort the data list after ManagedList does its work.
"""
super(OrderedManagedList, self).manage()
self.data = sorted(self.data, key=self.key, reverse=self.reverse)
|
[
"do_py.exceptions.RestrictionError.bad_data"
] |
[((1159, 1222), 'do_py.exceptions.RestrictionError.bad_data', 'RestrictionError.bad_data', (['self.data', 'self._restriction.allowed'], {}), '(self.data, self._restriction.allowed)\n', (1184, 1222), False, 'from do_py.exceptions import RestrictionError\n')]
|
#!/usr/bin/env python3
import unittest
import networkx as nx
from Medusa.graphs import bfs
class TestBFS(unittest.TestCase):
def test_disconnected_graph(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
self.assertEqual(list(G.nodes), node_list)
bfs.breadth_first_search(G, 'A', 1)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], -1)
self.assertEqual(G.nodes['C']['distance'], -1)
self.assertEqual(G.nodes['D']['distance'], -1)
self.assertEqual(G.nodes['E']['distance'], -1)
self.assertEqual(G.nodes['F']['distance'], -1)
def test_sequential(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 1)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
def test_parallel_2(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 2)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
def test_parallel_3(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 3)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
def test_parallel_4(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 4)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
def test_parallel_5(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 5)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
def test_parallel_6(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 6)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
def test_parallel_7(self):
G = nx.Graph()
node_list = ['A', 'B', 'C', 'D', 'E', 'F']
G.add_nodes_from(node_list)
edge_list = [('A','C'),('A', 'B'),('C','E'),('B', 'D'),('D','F')]
G.add_edges_from(edge_list)
bfs.breadth_first_search(G, 'A', 7)
self.assertEqual(G.nodes['A']['distance'], 0)
self.assertEqual(G.nodes['B']['distance'], 1)
self.assertEqual(G.nodes['C']['distance'], 1)
self.assertEqual(G.nodes['D']['distance'], 2)
self.assertEqual(G.nodes['E']['distance'], 2)
self.assertEqual(G.nodes['F']['distance'], 3)
|
[
"Medusa.graphs.bfs.breadth_first_search",
"networkx.Graph"
] |
[((177, 187), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (185, 187), True, 'import networkx as nx\n'), ((334, 369), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(1)'], {}), "(G, 'A', 1)\n", (358, 369), False, 'from Medusa.graphs import bfs\n'), ((743, 753), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (751, 753), True, 'import networkx as nx\n'), ((959, 994), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(1)'], {}), "(G, 'A', 1)\n", (983, 994), False, 'from Medusa.graphs import bfs\n'), ((1363, 1373), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1371, 1373), True, 'import networkx as nx\n'), ((1579, 1614), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(2)'], {}), "(G, 'A', 2)\n", (1603, 1614), False, 'from Medusa.graphs import bfs\n'), ((1983, 1993), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1991, 1993), True, 'import networkx as nx\n'), ((2199, 2234), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(3)'], {}), "(G, 'A', 3)\n", (2223, 2234), False, 'from Medusa.graphs import bfs\n'), ((2603, 2613), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2611, 2613), True, 'import networkx as nx\n'), ((2819, 2854), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(4)'], {}), "(G, 'A', 4)\n", (2843, 2854), False, 'from Medusa.graphs import bfs\n'), ((3223, 3233), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3231, 3233), True, 'import networkx as nx\n'), ((3439, 3474), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(5)'], {}), "(G, 'A', 5)\n", (3463, 3474), False, 'from Medusa.graphs import bfs\n'), ((3843, 3853), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3851, 3853), True, 'import networkx as nx\n'), ((4059, 4094), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(6)'], {}), "(G, 'A', 6)\n", (4083, 4094), False, 'from Medusa.graphs import bfs\n'), ((4463, 4473), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4471, 4473), True, 'import networkx as nx\n'), ((4679, 4714), 'Medusa.graphs.bfs.breadth_first_search', 'bfs.breadth_first_search', (['G', '"""A"""', '(7)'], {}), "(G, 'A', 7)\n", (4703, 4714), False, 'from Medusa.graphs import bfs\n')]
|
'''
Tenor class
@author: <NAME>
@copyright: BG Research LLC, 2011
@modified: July 2012 to replace SWIG Quantlib bindings with pyQL Cython code.
'''
from datetime import date
from pybg.enums import TimeUnits
from pybg.quantlib.time.api import *
from pybg.ql import pydate_from_qldate, qldate_from_pydate
class Tenor(object):
_tenorUnits = {'D': TimeUnits.Days,
'W': TimeUnits.Weeks,
'M': TimeUnits.Months,
'Y': TimeUnits.Years}
_tenorLength = {'D': 365,
'W': 52,
'M': 12,
'Y': 1} # useful for sorting
def __init__(self, txt):
firstNum = True
firstCh = True
numTxt = ""
unit="Y"
for i in str(txt).replace(' ', ''):
if i.isalnum():
if i.isdigit():
numTxt = numTxt + i
if firstNum:
firstNum = False
elif i.isalpha():
if firstCh and (i.upper() in self._tenorUnits):
unit = i.upper()
firstCh = False
else:
pass
if(firstNum):
numTxt="0"
self.length = int(numTxt)
self.unit = unit
self.timeunit = self._tenorUnits.get(self.unit, Days)
@classmethod
def fromdates(cls, settle, maturity, daycount=ActualActual()):
'''
Returns the tenor associated with settlement and maturity.
'''
settle = qldate_from_pydate(settle)
maturity = qldate_from_pydate(maturity)
years_ = daycount.year_fraction(settle, maturity)
if years_ >= 1.0:
t = "".join((str(int(round(years_))),"Y"))
else:
t = "".join((str(int(round(years_*12.))),"M"))
return cls(t)
def __str__(self):
return str(self.length)+self.unit
def __repr__(self):
return "<Tenor:"+self.__str__()+">"
def numberOfPeriods(self, frequency=Semiannual):
'''Returns the number of integer periods in the tenor
based on the given frequency.
'''
return int(self.term * int(frequency))
def advance(self,
date_,
convention=Unadjusted,
calendar=TARGET(),
reverse=False,
aspy=True):
date_ = qldate_from_pydate(date_)
length_ = self.length if not reverse else -self.length
date_ = calendar.advance(date_, length_, self.timeunit,
convention=convention)
return date_ if not aspy else pydate_from_qldate(date_)
def schedule(self, settle_, maturity_, convention=Unadjusted,
calendar=TARGET(),
aspy=True):
'''
tenor('3m').schedule(settleDate, maturityDate) or
tenor('3m').schedule(settleDate, '10Y')
gives a schedule of dates from settleDate to maturity with a
short front stub.
'''
settle_ = qldate_from_pydate(settle_)
mty_ = qldate_from_pydate(maturity_)
sched = []
if type(maturity_) == str and not mty_:
maturity_ = Tenor(maturity_).advance(settle_,
convention=convention,
calendar=calendar
)
else:
maturity_ = mty_
dt = maturity_
while dt.serial > settle_.serial:
sched.append(calendar.adjust(dt, convention))
dt = self.advance(dt, reverse=True)
else:
sched.append(settle_)
sched.sort(key=lambda dt: dt.serial)
if aspy:
sched = [pydate_from_qldate(dt) for dt in sched]
return sched
@property
def term(self):
'''
Length of tenor in years.
'''
return float(self.length) / float(self._tenorLength.get(self.unit, 1.0))
@property
def QLPeriod(self):
return Period(self.length, self.timeunit)
@property
def tuple(self):
return (self.length, self.timeunit)
|
[
"pybg.ql.pydate_from_qldate",
"pybg.ql.qldate_from_pydate"
] |
[((1632, 1658), 'pybg.ql.qldate_from_pydate', 'qldate_from_pydate', (['settle'], {}), '(settle)\n', (1650, 1658), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n'), ((1678, 1706), 'pybg.ql.qldate_from_pydate', 'qldate_from_pydate', (['maturity'], {}), '(maturity)\n', (1696, 1706), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n'), ((2553, 2578), 'pybg.ql.qldate_from_pydate', 'qldate_from_pydate', (['date_'], {}), '(date_)\n', (2571, 2578), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n'), ((3271, 3298), 'pybg.ql.qldate_from_pydate', 'qldate_from_pydate', (['settle_'], {}), '(settle_)\n', (3289, 3298), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n'), ((3314, 3343), 'pybg.ql.qldate_from_pydate', 'qldate_from_pydate', (['maturity_'], {}), '(maturity_)\n', (3332, 3343), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n'), ((2844, 2869), 'pybg.ql.pydate_from_qldate', 'pydate_from_qldate', (['date_'], {}), '(date_)\n', (2862, 2869), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n'), ((4062, 4084), 'pybg.ql.pydate_from_qldate', 'pydate_from_qldate', (['dt'], {}), '(dt)\n', (4080, 4084), False, 'from pybg.ql import pydate_from_qldate, qldate_from_pydate\n')]
|
import unittest
from spn.algorithms.EM import EM_optimization
from spn.algorithms.Inference import log_likelihood
from spn.algorithms.LearningWrappers import learn_parametric, learn_mspn
from spn.gpu.TensorFlow import spn_to_tf_graph, eval_tf, likelihood_loss, tf_graph_to_spn
from spn.structure.Base import Context
from spn.structure.StatisticalTypes import MetaType
import numpy as np
from spn.structure.leaves.parametric.Parametric import Gaussian
import tensorflow as tf
class TestEM(unittest.TestCase):
def test_optimization(self):
np.random.seed(17)
data = np.random.normal(10, 0.01, size=2000).tolist() + np.random.normal(30, 10, size=2000).tolist()
data = np.array(data).reshape((-1, 10))
data = data.astype(np.float32)
ds_context = Context(meta_types=[MetaType.REAL] * data.shape[1], parametric_types=[Gaussian] * data.shape[1])
spn = learn_parametric(data, ds_context)
spn.weights = [0.8, 0.2]
py_ll = log_likelihood(spn, data)
print(spn.weights)
EM_optimization(spn, data)
print(spn.weights)
py_ll_opt = log_likelihood(spn, data)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"spn.algorithms.LearningWrappers.learn_parametric",
"numpy.random.seed",
"spn.algorithms.EM.EM_optimization",
"spn.algorithms.Inference.log_likelihood",
"numpy.array",
"numpy.random.normal",
"spn.structure.Base.Context"
] |
[((1190, 1205), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1203, 1205), False, 'import unittest\n'), ((556, 574), 'numpy.random.seed', 'np.random.seed', (['(17)'], {}), '(17)\n', (570, 574), True, 'import numpy as np\n'), ((793, 894), 'spn.structure.Base.Context', 'Context', ([], {'meta_types': '([MetaType.REAL] * data.shape[1])', 'parametric_types': '([Gaussian] * data.shape[1])'}), '(meta_types=[MetaType.REAL] * data.shape[1], parametric_types=[\n Gaussian] * data.shape[1])\n', (800, 894), False, 'from spn.structure.Base import Context\n'), ((905, 939), 'spn.algorithms.LearningWrappers.learn_parametric', 'learn_parametric', (['data', 'ds_context'], {}), '(data, ds_context)\n', (921, 939), False, 'from spn.algorithms.LearningWrappers import learn_parametric, learn_mspn\n'), ((991, 1016), 'spn.algorithms.Inference.log_likelihood', 'log_likelihood', (['spn', 'data'], {}), '(spn, data)\n', (1005, 1016), False, 'from spn.algorithms.Inference import log_likelihood\n'), ((1054, 1080), 'spn.algorithms.EM.EM_optimization', 'EM_optimization', (['spn', 'data'], {}), '(spn, data)\n', (1069, 1080), False, 'from spn.algorithms.EM import EM_optimization\n'), ((1130, 1155), 'spn.algorithms.Inference.log_likelihood', 'log_likelihood', (['spn', 'data'], {}), '(spn, data)\n', (1144, 1155), False, 'from spn.algorithms.Inference import log_likelihood\n'), ((699, 713), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (707, 713), True, 'import numpy as np\n'), ((590, 627), 'numpy.random.normal', 'np.random.normal', (['(10)', '(0.01)'], {'size': '(2000)'}), '(10, 0.01, size=2000)\n', (606, 627), True, 'import numpy as np\n'), ((639, 674), 'numpy.random.normal', 'np.random.normal', (['(30)', '(10)'], {'size': '(2000)'}), '(30, 10, size=2000)\n', (655, 674), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import subprocess
try:
subprocess.call(["pyclean", ".."])
except:
print("error")
else:
print("*.pyc borrados")
|
[
"subprocess.call"
] |
[((50, 84), 'subprocess.call', 'subprocess.call', (["['pyclean', '..']"], {}), "(['pyclean', '..'])\n", (65, 84), False, 'import subprocess\n')]
|
"""MobileAlerts internet gataway."""
from typing import Any, Awaitable, Callable, Dict, List, Optional
import asyncio
import logging
import socket
import struct
import time
from ipaddress import IPv4Address
import aiohttp
from multidict import CIMultiDictProxy
from yarl import URL
from .sensor import Sensor
_LOGGER = logging.getLogger(__name__)
SensorHandler = Callable[[Sensor], Awaitable[None]]
#: all communication with the gateways are broadcasts
BROADCAST_ADDR = "255.255.255.255"
#: UDP port used by the gateway for comunnications
PORT = 8003
# Commands which acceps gateway via UDP:
DISCOVER_GATEWAYS = 1
#: Find any available gateway in the local network
FIND_GATEWAY = 2
#: Find a single available gateway in the local network
GET_CONFIG = 3
#: Request the configuration of the gateway
SET_CONFIG = 4
#: Set a new configuration. Gateway takes a few seconds to do the update
REBOOT = 5
#: A reboot takes about 10s for the gateway to be back up again
ORIG_PROXY_BYTE1 = 0x19
#: 'Magic' byte #1 to mark preserved original proxy settings
ORIG_PROXY_BYTE2 = 0x74
#: 'Magic' byte #2 to mark preserved original proxy settings
class Gateway:
"""Controls MobileAlerts internet gataway."""
def __init__(
self,
gateway_id: str,
local_ip_address: Optional[str] = None,
) -> None:
self._id: bytes = bytes.fromhex(gateway_id)
self._local_ip_address: Optional[str] = local_ip_address
self._handler: Optional[SensorHandler] = None
self._version = "1.50"
self._last_seen: Optional[float] = None
self._attached = False
self._orig_use_proxy: Any = None
self._orig_proxy: Any = None
self._orig_proxy_port: Any = None
self._dhcp_ip: Any = None
self._use_dhcp: Any = None
self._fixed_ip: Any = None
self._fixed_netmask: Any = None
self._fixed_gateway: Any = None
self._name: Any = None
self._server: Any = None
self._use_proxy: Any = None
self._proxy: Any = None
self._proxy_port: Any = None
self._fixed_dns: Any = None
self._send_data_to_cloud = True
self._sensors: Dict[str, Sensor] = dict()
self._initialized = False
async def init(
self,
config: Optional[bytes] = None,
) -> None:
if config is None:
config = await self.get_config()
if config is not None:
self.parse_config(config)
def _check_init(self) -> None:
if not self._initialized:
raise Exception("Gateway is not initialized")
@staticmethod
def prepare_socket(
timeout: int,
local_ip_address: Optional[str],
) -> socket.socket:
"""Prepares UDP socket to comunicate with the gateway."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
sock.settimeout(timeout)
if local_ip_address:
sock.bind((local_ip_address, 0))
else:
sock.bind(("", 0))
return sock
@staticmethod
def prepare_command(command: int, gateway_id: bytes) -> bytes:
"""Prepares command UDP packet to send."""
packet = struct.pack(">H6sH", command, gateway_id, 10)
return packet
async def send_command(
self, command: int, wait_for_result: bool = False, timeout: int = 2
) -> Optional[bytes]:
"""Sends command and optional data to the gateway."""
packet = self.prepare_command(command, self._id)
sock = self.prepare_socket(timeout, self._local_ip_address)
try:
sock.sendto(packet, (BROADCAST_ADDR, PORT))
if wait_for_result:
loop = asyncio.get_event_loop()
config = await asyncio.wait_for(loop.sock_recv(sock, 256), timeout)
self._last_seen = time.time()
return config
else:
return None
finally:
sock.close()
async def get_config(self, timeout: int = 2) -> Optional[bytes]:
"""Obtains configuration from the gateway."""
return await self.send_command(FIND_GATEWAY, True, timeout)
@staticmethod
def check_config(config: bytes) -> bool:
return (
config is not None
and (len(config) >= 186)
and (len(config) == int.from_bytes(config[8:10], "big"))
)
def parse_config(self, config: bytes) -> bool:
"""Parses configuration obtained from the gateway."""
result = self.check_config(config) and (
(self._id is None) or (self._id == config[2:8])
)
if result:
orig_data = bytearray()
self._id = config[2:8]
self._dhcp_ip = IPv4Address(config[11:15])
self._use_dhcp = config[15] != 0
self._fixed_ip = IPv4Address(config[16:20])
self._fixed_netmask = IPv4Address(config[20:24])
self._fixed_gateway = IPv4Address(config[24:28])
self._name = config[28 : config.find(0, 28, 49)].decode("utf-8")
str_end_pos = config.find(0, 49, 114)
if (
config[str_end_pos + 1] == ORIG_PROXY_BYTE1
and config[str_end_pos + 2] == ORIG_PROXY_BYTE2
):
orig_data.extend(config[str_end_pos + 3 : 114])
self._server = config[49:str_end_pos].decode("utf-8")
self._use_proxy = config[114] != 0
str_end_pos = config.find(0, 115, 180)
self._proxy = config[115:str_end_pos].decode("utf-8")
if (
config[str_end_pos + 1] == ORIG_PROXY_BYTE1
and config[str_end_pos + 2] == ORIG_PROXY_BYTE2
):
orig_data.extend(config[str_end_pos + 3 : 180])
self._proxy_port = int.from_bytes(config[180:182], "big")
self._fixed_dns = IPv4Address(config[182:186])
if len(orig_data) > 3:
self._orig_use_proxy = orig_data[0]
self._orig_proxy_port = int.from_bytes(orig_data[1:3], "big")
str_end_pos = orig_data.find(0, 3)
self._orig_proxy = orig_data[3:str_end_pos].decode("utf-8")
self._last_seen = time.time()
self._initialized = True
return result
async def update_config(self, timeout: int = 2) -> bool:
"""Updates configuration from the gateway."""
config = await self.get_config(timeout)
if config is not None:
return self.parse_config(config)
else:
return False
def set_config(self) -> None:
"""Set configuration to the gateway."""
self._check_init()
command = SET_CONFIG
if self._orig_use_proxy is not None:
orig_name_bytes = bytes(self._orig_proxy, "utf-8")
orig_data_size = 3 + len(orig_name_bytes)
else:
orig_data_size = 0
orig_data = bytearray(orig_data_size)
if orig_data_size > 0:
orig_data[0] = self._orig_use_proxy
orig_data[1:3] = self._orig_proxy_port.to_bytes(2, "big")
orig_data[3:orig_data_size] = orig_name_bytes
orig_data_pos = 0
packet_size = 181
packet = bytearray(packet_size)
packet[0:2] = command.to_bytes(2, "big")
packet[2:8] = self._id
packet[8:10] = packet_size.to_bytes(2, "big")
packet[10] = self._use_dhcp
packet[11:15] = self._fixed_ip.packed
packet[15:19] = self._fixed_netmask.packed
packet[19:23] = self._fixed_gateway.packed
str_bytes = bytes(self._name, "utf-8")
packet[23 : 23 + len(str_bytes)] = str_bytes
str_bytes = bytes(21 - len(str_bytes))
packet[44 - len(str_bytes) : 44] = str_bytes
str_bytes = bytes(self._server, "utf-8")
packet[44 : 44 + len(str_bytes)] = str_bytes
str_bytes = bytearray(65 - len(str_bytes))
if orig_data_pos < orig_data_size:
str_bytes[1] = ORIG_PROXY_BYTE1
str_bytes[2] = ORIG_PROXY_BYTE2
orig_part_size = min(orig_data_size - orig_data_pos, len(str_bytes) - 3)
str_bytes[3 : 3 + orig_part_size] = orig_data[
orig_data_pos : orig_data_pos + orig_part_size
]
orig_data_pos += orig_part_size
packet[109 - len(str_bytes) : 109] = str_bytes
packet[109] = self._use_proxy
str_bytes = bytes(str(self._proxy), "utf-8")
packet[110 : 110 + len(str_bytes)] = str_bytes
str_bytes = bytearray(65 - len(str_bytes))
if orig_data_pos < orig_data_size:
str_bytes[1] = ORIG_PROXY_BYTE1
str_bytes[2] = ORIG_PROXY_BYTE2
orig_part_size = min(orig_data_size - orig_data_pos, len(str_bytes) - 3)
str_bytes[3 : 3 + orig_part_size] = orig_data[
orig_data_pos : orig_data_pos + orig_part_size
]
packet[175 - len(str_bytes) : 175] = str_bytes
packet[175:177] = self._proxy_port.to_bytes(2, "big")
packet[177:181] = self._fixed_dns.packed
sock = Gateway.prepare_socket(1, self._local_ip_address)
try:
sock.sendto(packet, (BROADCAST_ADDR, PORT))
finally:
sock.close()
def reset_config(self) -> None:
"""Reset configuration of the gateway to default values."""
self.name = "MOBILEALERTS-Gateway"
self.use_dhcp = True
self.fixed_ip = "192.168.1.222"
self.fixed_netmask = "255.255.255.0"
self.fixed_gateway = "192.168.1.254"
self.fixed_dns = "192.168.1.253"
self.server = "www.data199.com"
self.use_proxy = False
self.proxy = "192.168.1.1"
self.proxy_port = 8080
self.set_config()
async def reboot(self, update_config: bool, timeout: int = 30) -> None:
"""Reboots the gateway and optional update configuration."""
config = await self.send_command(REBOOT, update_config, timeout)
if update_config and config is not None:
self.parse_config(config)
@staticmethod
async def discover(
local_ip_address: Optional[str] = None,
timeout: int = 2,
) -> List["Gateway"]:
"""Broadcasts discover packet and yeld gateway objects created from resposes."""
result = []
discovered = []
loop = asyncio.get_event_loop()
sock = Gateway.prepare_socket(timeout, local_ip_address)
packet = Gateway.prepare_command(DISCOVER_GATEWAYS, bytearray(6))
try:
sock.sendto(packet, (BROADCAST_ADDR, PORT))
while True:
try:
config = await asyncio.wait_for(loop.sock_recv(sock, 256), timeout)
except socket.timeout:
break
except asyncio.TimeoutError:
break
if Gateway.check_config(config):
gateway_id = config[2:8]
if gateway_id in discovered:
continue
discovered.append(gateway_id)
gateway = Gateway(gateway_id.hex().upper(), local_ip_address)
await gateway.init(config)
result.append(gateway)
finally:
sock.close()
return result
def set_handler(
self,
handler: Optional[SensorHandler],
) -> None:
self._handler = handler
def attach_to_proxy(
self,
proxy: str,
proxy_port: int,
handler: SensorHandler,
) -> None:
"""Attachs the gateway to the proxy to read measuremnts.
Existing proxy settings will be preserved
"""
if self._orig_use_proxy is None:
self._orig_use_proxy = self._use_proxy
self._orig_proxy = self._proxy
self._orig_proxy_port = self._proxy_port
self._attached = True
self._use_proxy = True
self._proxy = IPv4Address(proxy)
self._proxy_port = proxy_port
self.set_handler(handler)
self.set_config()
# await self.get_config()
def detach_from_proxy(self) -> None:
"""Detachs the gateway from the proxy and restore original settings."""
if self._attached:
self._use_proxy = self._orig_use_proxy
self._proxy = self._orig_proxy
self._proxy_port = self._orig_proxy_port
self._attached = False
self._orig_use_proxy = None
self._orig_proxy = None
self._orig_proxy_port = None
self.set_handler(None)
self.set_config()
def handle_bootup_update(self, package: bytes) -> None:
"""Handle gateway's bootup update packet."""
if (len(package) == 15) and (package[5:11] == self._id):
_LOGGER.debug(
"Gateway bootup timestamp %s",
time.ctime(int.from_bytes(package[1:5], "big")),
)
self._version = (
str(int.from_bytes(package[11:13], "big"))
+ "."
+ str(int.from_bytes(package[13:15], "big"))
)
self._last_seen = time.time()
def add_sensor(self, sensor: Sensor) -> None:
"""Add sensor object."""
self._sensors[sensor.sensor_id] = sensor
def create_sensor(self, sensor_id: str) -> Sensor:
"""Create new sensor object for given ID."""
result = Sensor(self, sensor_id)
self.add_sensor(result)
return result
def get_sensor(self, sensor_id: str) -> Sensor:
"""Return sensor object for given ID, creates the sensor if not exists."""
result = self._sensors.get(sensor_id, None)
if not result:
result = self.create_sensor(sensor_id)
return result
async def handle_sensor_update(self, package: bytes, package_checksum: int) -> None:
"""Handle update packet for one sensor."""
_LOGGER.debug(
"Update package %s, checksum %s",
package.hex().upper(),
hex(package_checksum),
)
checksum = 0
for b in package:
checksum += b
checksum &= 0x7F
if checksum == package_checksum:
self._last_seen = time.time()
sensor_id = package[6:12].hex().upper()
sensor = self.get_sensor(sensor_id)
sensor.parse_packet(package)
if self._handler:
await self._handler(sensor)
async def handle_sensors_update(self, packages: bytes) -> None:
"""Handle update packet for few sensors."""
pos = 0
packages_len = len(packages)
while pos + 64 <= packages_len:
await self.handle_sensor_update(
packages[pos : pos + 63], packages[pos + 63]
)
pos += 64
async def handle_update(self, code: str, packages: bytes) -> None:
"""Handle update packets."""
if code == "00":
self.handle_bootup_update(packages)
elif code == "C0":
await self.handle_sensors_update(packages)
else:
_LOGGER.error(
"Unknnow update code %d, data %s",
code,
packages.hex().upper(),
)
async def resend_data_to_cloud(
self,
url: URL,
headers: CIMultiDictProxy[str],
content: bytes,
) -> None:
"""Resend gateway's PUT request to cloud server."""
if self._send_data_to_cloud:
try:
async with aiohttp.ClientSession() as session:
async with session.put(
str(url), headers=headers, data=content
) as response:
response_content = await response.content.read()
_LOGGER.debug(
"Cloud response status: %s content: %s",
response.status,
response_content.hex().upper(),
)
except Exception as e:
_LOGGER.error("Error resending request to cloud: %r", e)
@property
def gateway_id(self) -> str:
return self._id.hex().upper()
@property
def serial(self) -> str:
return "80" + self._id[3:6].hex().upper()
@property
def version(self) -> str:
return self._version
@property
def last_seen(self) -> Optional[float]:
return self._last_seen
@property
def attached(self) -> bool:
return self._attached
@property
def send_data_to_cloud(self) -> bool:
return self._send_data_to_cloud
@send_data_to_cloud.setter
def send_data_to_cloud(self, value: bool) -> None:
self._send_data_to_cloud = value
@property
def dhcp_ip(self) -> str:
return str(self._dhcp_ip)
@property
def use_dhcp(self) -> bool:
return bool(self._use_dhcp)
@use_dhcp.setter
def use_dhcp(self, value: bool) -> None:
self._use_dhcp = value
@property
def fixed_ip(self) -> str:
return str(self._fixed_ip)
@fixed_ip.setter
def fixed_ip(self, value: str) -> None:
self._fixed_ip = IPv4Address(value)
@property
def fixed_netmask(self) -> str:
return str(self._fixed_netmask)
@fixed_netmask.setter
def fixed_netmask(self, value: str) -> None:
self._fixed_netmask = IPv4Address(value)
@property
def fixed_gateway(self) -> str:
return str(self._fixed_gateway)
@fixed_gateway.setter
def fixed_gateway(self, value: str) -> None:
self._fixed_gateway = IPv4Address(value)
@property
def name(self) -> str:
return str(self._name)
@name.setter
def name(self, value: str) -> None:
if len(bytes(value, "utf-8")) > 20:
raise ValueError("Name is too long")
self._name = value
@property
def server(self) -> str:
return str(self._server)
@server.setter
def server(self, value: str) -> None:
if len(bytes(value, "utf-8")) > 64:
raise ValueError("Server address is too long")
self._server = value
@property
def use_proxy(self) -> bool:
return bool(self._use_proxy)
@use_proxy.setter
def use_proxy(self, value: bool) -> None:
self._use_proxy = value
@property
def proxy(self) -> str:
return str(self._proxy)
@proxy.setter
def proxy(self, value: str) -> None:
if len(bytes(value, "utf-8")) > 64:
raise ValueError("Proxy server address is too long")
self._proxy = value
@property
def proxy_port(self) -> int:
return int(self._proxy_port)
@proxy_port.setter
def proxy_port(self, value: int) -> None:
if value < 0 or value >= 64 * 1024:
raise ValueError("Invalid proxy port number")
self._proxy_port = value
@property
def fixed_dns(self) -> str:
return str(self._fixed_dns)
@fixed_dns.setter
def fixed_dns(self, value: str) -> None:
self._fixed_dns = IPv4Address(value)
@property
def orig_use_proxy(self) -> bool:
return bool(self._orig_use_proxy)
@property
def orig_proxy(self) -> str:
return str(self._orig_proxy)
@property
def orig_proxy_port(self) -> int:
return int(self._orig_proxy_port)
def __repr__(self) -> str:
"""Return a formal representation of the gateway."""
return (
"%s.%s(%s(%s), "
"gateway_id=%s, "
"version=%r, "
"last_seen=%r, "
"attached=%r, "
"send_data_to_cloud=%r, "
"dhcp_ip=%r, "
"use_dhcp=%r, "
"fixed_ip=%r, "
"fixed_netmask=%r, "
"fixed_gateway=%r, "
"fixed_dns=%r, "
"server=%r, "
"use_proxy=%r, "
"proxy=%r, "
"proxy_port=%r, "
"orig_use_proxy=%r, "
"orig_proxy=%r, "
"orig_proxy_port=%r"
")"
) % (
self.__class__.__module__,
self.__class__.__qualname__,
self.name,
self.serial,
self.gateway_id,
self.version,
time.ctime(self.last_seen) if self.last_seen is not None else "never",
self.attached,
self.send_data_to_cloud,
self.dhcp_ip,
self.use_dhcp,
self.fixed_ip,
self.fixed_netmask,
self.fixed_gateway,
self.fixed_dns,
self.server,
self.use_proxy,
self.proxy,
self.proxy_port,
self.orig_use_proxy,
self.orig_proxy,
self.orig_proxy_port,
)
def __str__(self) -> str:
"""Return a readable representation of the gateway."""
return (
"%s V%s, SerialNo: %s (id: %s)\n"
"Use DHCP: %s\n"
"DHCP IP: %s\n"
"Fixed IP: %s\n"
"Fixed Netmask: %s\n"
"Fixed Gateway: %s\n"
"Fixed DNS: %s\n"
"Cloud Server: %s\n"
"Use Proxy: %s\n"
"Proxy Server: %s\n"
"Proxy Port: %s\n"
"Send data to cloud: %s\n"
"Last Contact: %s"
) % (
self.name,
self.version,
self.serial,
self.gateway_id,
"Yes" if self.use_dhcp else "No",
self.dhcp_ip,
self.fixed_ip,
self.fixed_netmask,
self.fixed_gateway,
self.fixed_dns,
self.server,
"Yes" if self.use_proxy else "No",
self.proxy,
self.proxy_port,
"Yes" if self.send_data_to_cloud else "No",
time.ctime(self.last_seen) if self.last_seen is not None else "never",
)
|
[
"asyncio.get_event_loop",
"socket.socket",
"time.ctime",
"struct.pack",
"time.time",
"aiohttp.ClientSession",
"ipaddress.IPv4Address",
"logging.getLogger"
] |
[((324, 351), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (341, 351), False, 'import logging\n'), ((2812, 2860), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2825, 2860), False, 'import socket\n'), ((3353, 3398), 'struct.pack', 'struct.pack', (['""">H6sH"""', 'command', 'gateway_id', '(10)'], {}), "('>H6sH', command, gateway_id, 10)\n", (3364, 3398), False, 'import struct\n'), ((10569, 10593), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10591, 10593), False, 'import asyncio\n'), ((12194, 12212), 'ipaddress.IPv4Address', 'IPv4Address', (['proxy'], {}), '(proxy)\n', (12205, 12212), False, 'from ipaddress import IPv4Address\n'), ((17457, 17475), 'ipaddress.IPv4Address', 'IPv4Address', (['value'], {}), '(value)\n', (17468, 17475), False, 'from ipaddress import IPv4Address\n'), ((17673, 17691), 'ipaddress.IPv4Address', 'IPv4Address', (['value'], {}), '(value)\n', (17684, 17691), False, 'from ipaddress import IPv4Address\n'), ((17889, 17907), 'ipaddress.IPv4Address', 'IPv4Address', (['value'], {}), '(value)\n', (17900, 17907), False, 'from ipaddress import IPv4Address\n'), ((19355, 19373), 'ipaddress.IPv4Address', 'IPv4Address', (['value'], {}), '(value)\n', (19366, 19373), False, 'from ipaddress import IPv4Address\n'), ((4908, 4934), 'ipaddress.IPv4Address', 'IPv4Address', (['config[11:15]'], {}), '(config[11:15])\n', (4919, 4934), False, 'from ipaddress import IPv4Address\n'), ((5009, 5035), 'ipaddress.IPv4Address', 'IPv4Address', (['config[16:20]'], {}), '(config[16:20])\n', (5020, 5035), False, 'from ipaddress import IPv4Address\n'), ((5070, 5096), 'ipaddress.IPv4Address', 'IPv4Address', (['config[20:24]'], {}), '(config[20:24])\n', (5081, 5096), False, 'from ipaddress import IPv4Address\n'), ((5131, 5157), 'ipaddress.IPv4Address', 'IPv4Address', (['config[24:28]'], {}), '(config[24:28])\n', (5142, 5157), False, 'from ipaddress import IPv4Address\n'), ((6055, 6083), 'ipaddress.IPv4Address', 'IPv4Address', (['config[182:186]'], {}), '(config[182:186])\n', (6066, 6083), False, 'from ipaddress import IPv4Address\n'), ((6406, 6417), 'time.time', 'time.time', ([], {}), '()\n', (6415, 6417), False, 'import time\n'), ((13382, 13393), 'time.time', 'time.time', ([], {}), '()\n', (13391, 13393), False, 'import time\n'), ((14476, 14487), 'time.time', 'time.time', ([], {}), '()\n', (14485, 14487), False, 'import time\n'), ((3864, 3888), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3886, 3888), False, 'import asyncio\n'), ((4007, 4018), 'time.time', 'time.time', ([], {}), '()\n', (4016, 4018), False, 'import time\n'), ((15780, 15803), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (15801, 15803), False, 'import aiohttp\n'), ((20550, 20576), 'time.ctime', 'time.ctime', (['self.last_seen'], {}), '(self.last_seen)\n', (20560, 20576), False, 'import time\n'), ((22108, 22134), 'time.ctime', 'time.ctime', (['self.last_seen'], {}), '(self.last_seen)\n', (22118, 22134), False, 'import time\n')]
|
import os
import pytest
import torch
import torchvision
from flower_classifier.datasets.csv import CSVDataset
from flower_classifier.datasets.oxford_flowers import OxfordFlowers102Dataset, OxfordFlowersDataModule, split_dataset
from flower_classifier.datasets.random import RandomDataModule
from tests.datasets import TEST_CACHE_DIR
@pytest.fixture(scope="module")
def oxford_dataset() -> torch.utils.data.Dataset:
transforms = [
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
dataset = OxfordFlowers102Dataset(root_dir=TEST_CACHE_DIR, download=True, transforms=transforms)
return dataset
@pytest.fixture(scope="module")
def oxford_dataloader(oxford_dataset):
dataloader = torch.utils.data.DataLoader(oxford_dataset, batch_size=8, shuffle=False)
return dataloader
@pytest.fixture(scope="module")
def oxford_datamodule():
transforms = [
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
data_module = OxfordFlowersDataModule(data_dir=TEST_CACHE_DIR, batch_size=32, train_transforms=transforms)
return data_module
@pytest.fixture(scope="module")
def oxford_csv_dataset() -> torch.utils.data.Dataset:
split_dataset(root_dir=TEST_CACHE_DIR, target_dir=TEST_CACHE_DIR)
train_filename = os.path.join(TEST_CACHE_DIR, "train_split.csv")
transforms = [
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
dataset = CSVDataset(filename=train_filename, transforms=transforms)
return dataset
@pytest.fixture(scope="module")
def oxford_csv_dataloader(oxford_csv_dataset):
dataloader = torch.utils.data.DataLoader(oxford_csv_dataset, batch_size=8, shuffle=False)
return dataloader
@pytest.fixture(scope="module")
def random_datamodule():
data_module = RandomDataModule(batch_size=32)
return data_module
|
[
"os.path.join",
"torch.utils.data.DataLoader",
"flower_classifier.datasets.csv.CSVDataset",
"torchvision.transforms.Normalize",
"pytest.fixture",
"flower_classifier.datasets.random.RandomDataModule",
"flower_classifier.datasets.oxford_flowers.OxfordFlowers102Dataset",
"flower_classifier.datasets.oxford_flowers.split_dataset",
"torchvision.transforms.RandomResizedCrop",
"flower_classifier.datasets.oxford_flowers.OxfordFlowersDataModule",
"torchvision.transforms.ToTensor"
] |
[((338, 368), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (352, 368), False, 'import pytest\n'), ((753, 783), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (767, 783), False, 'import pytest\n'), ((938, 968), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (952, 968), False, 'import pytest\n'), ((1342, 1372), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1356, 1372), False, 'import pytest\n'), ((1872, 1902), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1886, 1902), False, 'import pytest\n'), ((2069, 2099), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2083, 2099), False, 'import pytest\n'), ((644, 735), 'flower_classifier.datasets.oxford_flowers.OxfordFlowers102Dataset', 'OxfordFlowers102Dataset', ([], {'root_dir': 'TEST_CACHE_DIR', 'download': '(True)', 'transforms': 'transforms'}), '(root_dir=TEST_CACHE_DIR, download=True, transforms=\n transforms)\n', (667, 735), False, 'from flower_classifier.datasets.oxford_flowers import OxfordFlowers102Dataset, OxfordFlowersDataModule, split_dataset\n'), ((840, 912), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['oxford_dataset'], {'batch_size': '(8)', 'shuffle': '(False)'}), '(oxford_dataset, batch_size=8, shuffle=False)\n', (867, 912), False, 'import torch\n'), ((1223, 1319), 'flower_classifier.datasets.oxford_flowers.OxfordFlowersDataModule', 'OxfordFlowersDataModule', ([], {'data_dir': 'TEST_CACHE_DIR', 'batch_size': '(32)', 'train_transforms': 'transforms'}), '(data_dir=TEST_CACHE_DIR, batch_size=32,\n train_transforms=transforms)\n', (1246, 1319), False, 'from flower_classifier.datasets.oxford_flowers import OxfordFlowers102Dataset, OxfordFlowersDataModule, split_dataset\n'), ((1431, 1496), 'flower_classifier.datasets.oxford_flowers.split_dataset', 'split_dataset', ([], {'root_dir': 'TEST_CACHE_DIR', 'target_dir': 'TEST_CACHE_DIR'}), '(root_dir=TEST_CACHE_DIR, target_dir=TEST_CACHE_DIR)\n', (1444, 1496), False, 'from flower_classifier.datasets.oxford_flowers import OxfordFlowers102Dataset, OxfordFlowersDataModule, split_dataset\n'), ((1518, 1565), 'os.path.join', 'os.path.join', (['TEST_CACHE_DIR', '"""train_split.csv"""'], {}), "(TEST_CACHE_DIR, 'train_split.csv')\n", (1530, 1565), False, 'import os\n'), ((1791, 1849), 'flower_classifier.datasets.csv.CSVDataset', 'CSVDataset', ([], {'filename': 'train_filename', 'transforms': 'transforms'}), '(filename=train_filename, transforms=transforms)\n', (1801, 1849), False, 'from flower_classifier.datasets.csv import CSVDataset\n'), ((1967, 2043), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['oxford_csv_dataset'], {'batch_size': '(8)', 'shuffle': '(False)'}), '(oxford_csv_dataset, batch_size=8, shuffle=False)\n', (1994, 2043), False, 'import torch\n'), ((2143, 2174), 'flower_classifier.datasets.random.RandomDataModule', 'RandomDataModule', ([], {'batch_size': '(32)'}), '(batch_size=32)\n', (2159, 2174), False, 'from flower_classifier.datasets.random import RandomDataModule\n'), ((446, 491), 'torchvision.transforms.RandomResizedCrop', 'torchvision.transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (486, 491), False, 'import torchvision\n'), ((501, 534), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (532, 534), False, 'import torchvision\n'), ((544, 622), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (576, 622), False, 'import torchvision\n'), ((1021, 1066), 'torchvision.transforms.RandomResizedCrop', 'torchvision.transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1061, 1066), False, 'import torchvision\n'), ((1076, 1109), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1107, 1109), False, 'import torchvision\n'), ((1119, 1197), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1151, 1197), False, 'import torchvision\n'), ((1593, 1638), 'torchvision.transforms.RandomResizedCrop', 'torchvision.transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1633, 1638), False, 'import torchvision\n'), ((1648, 1681), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1679, 1681), False, 'import torchvision\n'), ((1691, 1769), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1723, 1769), False, 'import torchvision\n')]
|
"""
Objective
In this challenge, we learn about Poisson distributions.
Task
A random variable, X, follows Poisson distribution with mean of 2.5.
Find the probability with which the random variable X is equal to 5.
"""
from math import exp, factorial
def poisson(lam=2.5, k=5):
"""
Return the probability of X=k with possion distribution with mean lam
"""
return lam**k*exp(-lam)/factorial(k)
print(round(poisson(), 3))
|
[
"math.exp",
"math.factorial"
] |
[((398, 410), 'math.factorial', 'factorial', (['k'], {}), '(k)\n', (407, 410), False, 'from math import exp, factorial\n'), ((388, 397), 'math.exp', 'exp', (['(-lam)'], {}), '(-lam)\n', (391, 397), False, 'from math import exp, factorial\n')]
|
"""Common utils for parsing and handling InferenceServices."""
import os
from kubeflow.kubeflow.crud_backend import api, helpers, logging
log = logging.getLogger(__name__)
KNATIVE_REVISION_LABEL = "serving.knative.dev/revision"
FILE_ABS_PATH = os.path.abspath(os.path.dirname(__file__))
INFERENCESERVICE_TEMPLATE_YAML = os.path.join(
FILE_ABS_PATH, "yaml", "inference_service_template.yaml")
def load_inference_service_template(**kwargs):
"""
Return an InferenceService dict, with defaults from the local yaml.
Reads the yaml for the web app's custom resource, replaces the variables
and returns it as a python dict.
kwargs: the parameters to be replaced in the yaml
"""
return helpers.load_param_yaml(INFERENCESERVICE_TEMPLATE_YAML, **kwargs)
# helper functions for accessing the logs of an InferenceService
def get_inference_service_pods(svc, components=[]):
"""
Return the Pod names for the different isvc components.
Return a dictionary with (endpoint, component) keys,
i.e. ("default", "predictor") and a list of pod names as values
"""
namespace = svc["metadata"]["namespace"]
# dictionary{revisionName: (endpoint, component)}
revisions_dict = get_components_revisions_dict(components, svc)
if len(revisions_dict.keys()) == 0:
return {}
pods = api.list_pods(namespace, auth=False).items
component_pods_dict = {}
for pod in pods:
for revision in revisions_dict:
if KNATIVE_REVISION_LABEL not in pod.metadata.labels:
continue
if pod.metadata.labels[KNATIVE_REVISION_LABEL] != revision:
continue
component = revisions_dict[revision]
curr_pod_names = component_pods_dict.get(component, [])
curr_pod_names.append(pod.metadata.name)
component_pods_dict[component] = curr_pod_names
if len(component_pods_dict.keys()) == 0:
log.info("No pods are found for inference service: %s",
svc["metadata"]["name"])
return component_pods_dict
# FIXME(elikatsis,kimwnasptd): Change the logic of this function according to
# https://github.com/arrikto/dev/issues/867
def get_components_revisions_dict(components, svc):
"""Return a dictionary{revisionId: component}."""
status = svc["status"]
revisions_dict = {}
for component in components:
if "components" not in status:
log.info("Component '%s' not in inference service '%s'",
component, svc["metadata"]["name"])
continue
if component not in status["components"]:
log.info("Component '%s' not in inference service '%s'",
component, svc["metadata"]["name"])
continue
if "latestReadyRevision" in status["components"][component]:
revision = status["components"][component]["latestReadyRevision"]
revisions_dict[revision] = component
if len(revisions_dict.keys()) == 0:
log.info(
"No revisions found for the inference service's components: %s",
svc["metadata"]["name"],
)
return revisions_dict
|
[
"kubeflow.kubeflow.crud_backend.logging.getLogger",
"os.path.dirname",
"kubeflow.kubeflow.crud_backend.api.list_pods",
"os.path.join",
"kubeflow.kubeflow.crud_backend.helpers.load_param_yaml"
] |
[((146, 173), 'kubeflow.kubeflow.crud_backend.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (163, 173), False, 'from kubeflow.kubeflow.crud_backend import api, helpers, logging\n'), ((324, 394), 'os.path.join', 'os.path.join', (['FILE_ABS_PATH', '"""yaml"""', '"""inference_service_template.yaml"""'], {}), "(FILE_ABS_PATH, 'yaml', 'inference_service_template.yaml')\n", (336, 394), False, 'import os\n'), ((263, 288), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (278, 288), False, 'import os\n'), ((718, 783), 'kubeflow.kubeflow.crud_backend.helpers.load_param_yaml', 'helpers.load_param_yaml', (['INFERENCESERVICE_TEMPLATE_YAML'], {}), '(INFERENCESERVICE_TEMPLATE_YAML, **kwargs)\n', (741, 783), False, 'from kubeflow.kubeflow.crud_backend import api, helpers, logging\n'), ((1344, 1380), 'kubeflow.kubeflow.crud_backend.api.list_pods', 'api.list_pods', (['namespace'], {'auth': '(False)'}), '(namespace, auth=False)\n', (1357, 1380), False, 'from kubeflow.kubeflow.crud_backend import api, helpers, logging\n')]
|
from note import Note
from majorScale import MajorScale
from minorScale import MinorScale
print("Hi welcome to my app.\n")
note = None
scale = None
while(True):
# if no scale is chosen
if scale is None:
# choose a note
if note is None:
note = input("Choose a note: ")
menu = ("1. {0} major scale\n"
"2. {0} minor scale\n"
"3. Choose another note\n"
"4. Exit").format(note)
print(menu)
# choose major or minor
optn = input("\nChoose an option: ")
if optn == "1":
scale = MajorScale(note)
elif optn == "2":
scale = MinorScale(note)
elif optn == "3":
note = None
elif optn == "4":
break
else:
print("Invalid option. Try again.\n")
# major scale
if isinstance(scale, MajorScale):
print(scale)
menu = ("1. Get parallel minor\n"
"2. Get relative minor\n"
"3. Choose another note\n"
"4. Exit").format(note)
print(menu)
optn = input("\nChoose an option: ")
if optn == "1":
scale = MinorScale(scale,1)
elif optn == "2":
scale = MinorScale(scale,2)
elif optn == "3":
note = None
scale = None
elif optn == "4":
break
else:
print("Invalid option. Try again.\n")
# minor scale
if isinstance(scale, MinorScale):
print(scale)
menu = ("1. Get parallel major\n"
"2. Get relative major\n"
"3. Choose another note\n"
"4. Exit").format(note)
print(menu)
optn = input("\nChoose an option: ")
if optn == "1":
scale = MajorScale(scale,1)
elif optn == "2":
scale = MajorScale(scale,2)
elif optn == "3":
note = None
scale = None
elif optn == "4":
break
else:
print("Invalid option. Try again.\n")
print("Bye!")
|
[
"majorScale.MajorScale",
"minorScale.MinorScale"
] |
[((603, 619), 'majorScale.MajorScale', 'MajorScale', (['note'], {}), '(note)\n', (613, 619), False, 'from majorScale import MajorScale\n'), ((1182, 1202), 'minorScale.MinorScale', 'MinorScale', (['scale', '(1)'], {}), '(scale, 1)\n', (1192, 1202), False, 'from minorScale import MinorScale\n'), ((1792, 1812), 'majorScale.MajorScale', 'MajorScale', (['scale', '(1)'], {}), '(scale, 1)\n', (1802, 1812), False, 'from majorScale import MajorScale\n'), ((666, 682), 'minorScale.MinorScale', 'MinorScale', (['note'], {}), '(note)\n', (676, 682), False, 'from minorScale import MinorScale\n'), ((1248, 1268), 'minorScale.MinorScale', 'MinorScale', (['scale', '(2)'], {}), '(scale, 2)\n', (1258, 1268), False, 'from minorScale import MinorScale\n'), ((1858, 1878), 'majorScale.MajorScale', 'MajorScale', (['scale', '(2)'], {}), '(scale, 2)\n', (1868, 1878), False, 'from majorScale import MajorScale\n')]
|
#this could be in a repo on its own should have used a
#obj oriented approach
"""manages GTK3 broadwayd displays
.. and to minimize bash scripting ugggh
usage:
>displynum, port =display.add()
>display.app('gedit',displaynum) #where gedit is a gtk3 app
you may want to set the limits after import
>import display
>display.DisplayLimit=10
"""
import signal
import os
import atexit
import subprocess
from collections import defaultdict
from time import sleep
import socket
import psutil # optionally used
port2display={}
display2port={}
class LimitError(Exception): val=None; pass
class DisplayLimit(LimitError):
"""a limit to the number of displays"""
val=10;
pass
class ApplicationLimit(LimitError):
"""a limit to the number of applications per display"""
val=10
pass
#should program onappstart onappclose
#todo capture stdio on procs
def get_openport():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
return s.getsockname()[1]
class sequenceg(): #should have used a generator but cool to...
#..hack classes
dc=0
@staticmethod
def getdisplay(self):
self.dc+=1 ;
return self.dc
@staticmethod
def __call__(self):
return self.getdisplay(self)
sequence=lambda p: sequenceg.__call__(sequenceg)
def friendly_display(port,begin=8000):
"""for wehn you want some 'web' ports"""
ret= port-begin
if ret < 0 or port<0:
raise ValueError('neg values')
return ret
def display_is_port(port):
display=port
return display
#functions need to be one to one mappings bw out and in
#port2display_function
p2df=sequence
port2display_function=p2df #don't use the port2dispaly_func ...
#... in the code
#display_is_port#friendly_display#
# class keydefaultdict(defaultdict):
# def __missing__(self, key):
# if self.default_factory is None:
# raise KeyError( key )
# else:
# ret = self[key] = self.default_factory(key)
# return ret
class displaydict(defaultdict):
#adding issues are covvered by add()
def removemapping(self,display):
port2display.pop(display2port.pop(display))
def __delitem__(self, display):
super(displaydict, self).__delitem__(display)
self.removemapping(display)
def pop(self, display):
super(displaydict, self).pop(display)
self.removemapping(display)
#procs assoc with each display
running_displays=displaydict(list)
#lesson learned:
#def add(port,block=True) not a good idea to specify a port
def add(portgetter=get_openport
,block=True):#don't see a reason to not block
remove_zombie_apps(); kill_zombie_displays()
if len(running_displays)==DisplayLimit.val:
raise DisplayLimit(DisplayLimit.val)
port=portgetter() #not safe. need to reserve port
"""runs the html5 part of the app returning the display number
blocks until the dispaly server is up by default"""
display=p2df(port)
if display in running_displays:
raise KeyError('display server already running')
else:
if isport_openable(port) is True:
raise ValueError("can't get port "+str(port))
try:
p=subprocess.Popen(['./start_display.sh'
,str(display),str(port)]
#,preexec_fn=os.setsid
)
except: #todo: problem: broadwayd does not exit if it
#cant get the port. it gives back:
#"Can't listen: Error binding to address: Address already in use"
#dont' p.wait
raise Exception("couldn't start display")
#block until 'app' is ready on the port
if block==True:#todo if port given not openable
tries=0
while ( (isport_openable(port) is not True) ):
tries+=1 ; #sometimes it gets stuck here if
#rapid requests
if tries>10: return add(portgetter,block) #not nice
sleep(.1); continue
#registrations
running_displays[display].append(p) #the only reason it's a...
#...default dict.. do i really need defaultdict?
port2display[port]=display;
display2port[display]=port
# port->display should be 1 to 1 mapping
if len(display2port) != len(port2display):
raise Exception('display and port numbers are not 1-to-1')
return display, port
#what happens when the app spawns a window or another proc?
#on multiple gedits only the first one is alive
def app(cmd,display,**kwargs):
"""runs a gtk3 prog on display. """
if (display) not in running_displays:
raise ValueError('display does not exist')
remove_zombie_apps()
if (len(running_displays[display])-1)==ApplicationLimit.val:
raise ApplicationLimit(ApplicationLimit.val)
#kwargs['preexec_fn']=os.setpgid
sp=subprocess.Popen(['./display.sh',cmd,str(display)]
,**kwargs)
running_displays[display].append(sp)
return sp
def isport_openable(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1',port)) #if can bind then not busy
s.close()
return False
except: return True
# cr=s.connect_ex(('127.0.0.1', port))
# if cr==0: return True
# else: return cr
def stop(display,signal=signal.SIGKILL):#signal.SIGINT):
# when using this with the server.. can't rely on being nice
# so just kill it
"""stops display and everything running on it"""
if display not in running_displays:
raise KeyError('no display #'+str(display)+' to kill')
#os.killpg(p.pid, signal.SIGTERM)
proclist= running_displays[display]
for p in reversed(proclist):
p.send_signal(signal);
#p.kill()
p.wait()
running_displays.pop(display)
remove_zombie_apps()
def remove_zombie_apps():
#the not immediate
delthese=[]
for adisplay in running_displays:
for an,aproc in enumerate(running_displays[adisplay]):
if an==0:continue #skip the broadway proc
if aproc.poll() is None: continue# running
else: delthese.append( (adisplay,an) )
for adisplay,an in delthese:
#in case empty list
try: running_displays[adisplay].pop(an) #the process...
# ..will be removed by the garbage collector eventually
except: pass
def kill_zombie_displays(really=True):#seems to add robustness...
#stop it if it become a problem
if really is not True: return
for ap in psutil.process_iter():
try: cmdline = ap.cmdline[0]
except: continue
if cmdline == 'broadwayd':
# index 2 is the port
if int(ap.cmdline[2]) not in port2display: ap.kill()
def kill_all():
"""kills all display apps on the server forcefully
...that it knows about that is."""
for ad in running_displays.keys():
stop(ad,signal=signal.SIGKILL)
atexit.register(kill_all)
|
[
"atexit.register",
"psutil.process_iter",
"socket.socket",
"time.sleep"
] |
[((6878, 6903), 'atexit.register', 'atexit.register', (['kill_all'], {}), '(kill_all)\n', (6893, 6903), False, 'import atexit\n'), ((896, 945), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (909, 945), False, 'import socket\n'), ((4958, 5007), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4971, 5007), False, 'import socket\n'), ((6468, 6489), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (6487, 6489), False, 'import psutil\n'), ((3912, 3922), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (3917, 3922), False, 'from time import sleep\n')]
|
# //using pivot element and partition and merge sort basically
import random
def swap(A, i, j):
A[i], A[j] = A[j], A[i]
def partition(A, lo, hi):
pivot = A[lo]
i = lo + 1
j = hi
while True:
while A[i] < pivot:
i += 1
if i == hi:
break
while A[j] > pivot:
j -= 1
if j == lo:
break
if j <= i:
break
swap(A, i, j)
swap(A, lo, j)
print(A)
return j
def k_smallest(A, k):
lo = 0
hi = len(A) - 1
k = k - 1
random.shuffle(A)
while hi > lo:
j = partition(A, lo, hi)
if j == k:
return A[k]
elif j > k:
hi = j - 1
else:
lo = j + 1
return A[k]
if __name__ == '__main__':
test_case = int(input())
for _ in range(test_case):
number_of_elements = int(input())
A = [int(x) for x in input().strip().split(' ')]
k = int(input())
print(k_smallest(A, k))
|
[
"random.shuffle"
] |
[((575, 592), 'random.shuffle', 'random.shuffle', (['A'], {}), '(A)\n', (589, 592), False, 'import random\n')]
|
import os
print(os.name)
print(os.uname())
print(os.environ)
print(os.environ.get('PATH'))
p = os.path.join('.', 'test_dir')
print(p)
os.mkdir(p)
os.rmdir(p)
|
[
"os.mkdir",
"os.uname",
"os.environ.get",
"os.rmdir",
"os.path.join"
] |
[((98, 127), 'os.path.join', 'os.path.join', (['"""."""', '"""test_dir"""'], {}), "('.', 'test_dir')\n", (110, 127), False, 'import os\n'), ((137, 148), 'os.mkdir', 'os.mkdir', (['p'], {}), '(p)\n', (145, 148), False, 'import os\n'), ((149, 160), 'os.rmdir', 'os.rmdir', (['p'], {}), '(p)\n', (157, 160), False, 'import os\n'), ((32, 42), 'os.uname', 'os.uname', ([], {}), '()\n', (40, 42), False, 'import os\n'), ((69, 91), 'os.environ.get', 'os.environ.get', (['"""PATH"""'], {}), "('PATH')\n", (83, 91), False, 'import os\n')]
|
import os, subprocess
def execute_shell_process(message, command):
print(message)
env_copy = os.environ.copy()
output = subprocess.run(command, env=env_copy, shell=True)
if output.returncode == 0:
print("Success!")
else:
print("Oops! Please try again.")
|
[
"subprocess.run",
"os.environ.copy"
] |
[((103, 120), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (118, 120), False, 'import os, subprocess\n'), ((134, 183), 'subprocess.run', 'subprocess.run', (['command'], {'env': 'env_copy', 'shell': '(True)'}), '(command, env=env_copy, shell=True)\n', (148, 183), False, 'import os, subprocess\n')]
|
from unittest import TestCase
from jsonconf import CommandLineParser
class ConfigTests(TestCase):
def setUp(self):
pass
def test_constructor(self):
parser = CommandLineParser()
self.assertTrue(parser is not None)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), [])
self.assertEqual(parser.getProgram(), None)
def test_emptyArgs(self):
args = []
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), [])
self.assertEqual(parser.getProgram(), None)
def test_singleArg(self):
args = ["/usr/bin/whatever"]
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), [])
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_extraArgs(self):
extraArgs = ["one", "two", "-d", "--ignore"]
args = ["/usr/bin/whatever"]
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_keyArgs(self):
kwargs = {
"one": '1',
"two": "2",
"-d": "hello",
"--ignore": '5',
}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), kwargs)
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_complexKey(self):
kwargs = {
"one.two.three": '1',
}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), kwargs)
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_both(self):
kwargs = {
"one": '1',
"two.three": '1',
}
extraArgs = ["--test", "-v"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), kwargs)
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_requiredTest(self):
kwargs = {}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.requireKey("verbose")
self.assertRaises(Exception, parser.parse, args)
def test_requiredTest2(self):
kwargs = {"--verbose": 1}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.requireKey("--verbose")
parser.parse(args)
def test_invalidConverter(self):
kwargs = {"--verbose": "hello"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
# Cannot parse string to int
parser.requireKey("--verbose", int)
self.assertRaises(Exception, parser.parse, args)
def test_invalidConverter(self):
kwargs = {"--verbose": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.requireKey("--verbose", int)
parser.parse(args)
def test_renameKeywordArguments(self):
kwargs = {"--verbose": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"-v": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"verbose": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"verb": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"verbose": "1", "--verbose": "1", "-v": "1", "verb": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
def test_renameExtraArguments(self):
kwargs = {}
extraArgs = ["-v"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["--verbose"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["verbose"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["verb"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["-v", "--verbose", "verb", "verbose"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
def test_renameOtherArgs(self):
kwargs = {"test": "255"}
extraArgs = ["--verbose", "otherArg"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {"test": "255"})
self.assertEqual(parser.getExtraArguments(), ["verbose", "otherArg"])
|
[
"jsonconf.CommandLineParser"
] |
[((185, 204), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (202, 204), False, 'from jsonconf import CommandLineParser\n'), ((483, 502), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (500, 502), False, 'from jsonconf import CommandLineParser\n'), ((784, 803), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (801, 803), False, 'from jsonconf import CommandLineParser\n'), ((1186, 1205), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (1203, 1205), False, 'from jsonconf import CommandLineParser\n'), ((1775, 1794), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (1792, 1794), False, 'from jsonconf import CommandLineParser\n'), ((2301, 2320), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (2318, 2320), False, 'from jsonconf import CommandLineParser\n'), ((2855, 2874), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (2872, 2874), False, 'from jsonconf import CommandLineParser\n'), ((3336, 3355), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (3353, 3355), False, 'from jsonconf import CommandLineParser\n'), ((3704, 3723), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (3721, 3723), False, 'from jsonconf import CommandLineParser\n'), ((4053, 4072), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (4070, 4072), False, 'from jsonconf import CommandLineParser\n'), ((4471, 4490), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (4488, 4490), False, 'from jsonconf import CommandLineParser\n'), ((4827, 4846), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (4844, 4846), False, 'from jsonconf import CommandLineParser\n'), ((5278, 5297), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (5295, 5297), False, 'from jsonconf import CommandLineParser\n'), ((5734, 5753), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (5751, 5753), False, 'from jsonconf import CommandLineParser\n'), ((6187, 6206), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (6204, 6206), False, 'from jsonconf import CommandLineParser\n'), ((6685, 6704), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (6702, 6704), False, 'from jsonconf import CommandLineParser\n'), ((7171, 7190), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (7188, 7190), False, 'from jsonconf import CommandLineParser\n'), ((7619, 7638), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (7636, 7638), False, 'from jsonconf import CommandLineParser\n'), ((8065, 8084), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (8082, 8084), False, 'from jsonconf import CommandLineParser\n'), ((8508, 8527), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (8525, 8527), False, 'from jsonconf import CommandLineParser\n'), ((8981, 9000), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (8998, 9000), False, 'from jsonconf import CommandLineParser\n'), ((9510, 9529), 'jsonconf.CommandLineParser', 'CommandLineParser', ([], {}), '()\n', (9527, 9529), False, 'from jsonconf import CommandLineParser\n')]
|
import unittest
import pydictionaria.sfm2cldf as s
import clldutils.sfm as sfm
class SplitMarkersWithSeparators(unittest.TestCase):
def test_lump_everything_together_if_seperator_isnt_found(self):
sep = 'sep'
input_markers = [
('marker1', 'value1'),
('marker2', 'value2')]
expected = [
[('marker1', 'value1'), ('marker2', 'value2')]]
self.assertEqual(
list(s.group_by_separator(sep, input_markers)),
expected)
def test_split_groups_on_separator(self):
sep = 'sep'
input_markers = [
('marker1', 'value1'),
('sep', 'value'),
('marker2', 'value2')]
expected = [
[('marker1', 'value1')],
[('sep', 'value'), ('marker2', 'value2')]]
self.assertEqual(
list(s.group_by_separator(sep, input_markers)),
expected)
class SplitListByPredicate(unittest.TestCase):
def test_no_element_matches_pred(self):
def iseven(x):
return x % 2 == 0
elements = [1, 3, 5]
even, odd = s.split_by_pred(iseven, elements)
self.assertEqual(even, [])
self.assertEqual(odd, [1, 3, 5])
def test_all_elements_match_pred(self):
def iseven(x):
return x % 2 == 0
elements = [2, 4, 6]
even, odd = s.split_by_pred(iseven, elements)
self.assertEqual(even, [2, 4, 6])
self.assertEqual(odd, [])
def test_some_elements_match_pred(self):
def iseven(x):
return x % 2 == 0
elements = [1, 2, 3, 4]
even, odd = s.split_by_pred(iseven, elements)
self.assertEqual(even, [2, 4])
self.assertEqual(odd, [1, 3])
class GenerateSequentialIDs(unittest.TestCase):
def test_sequence_starts_with_one(self):
gen = s.IDGenerator()
first_id = gen.next_id()
self.assertEqual(first_id, '000001')
def test_sequence_counts_up(self):
gen = s.IDGenerator()
first_id = gen.next_id()
second_id = gen.next_id()
self.assertEqual(first_id, '000001')
self.assertEqual(second_id, '000002')
def test_adding_prefix(self):
gen = s.IDGenerator('PRE')
first_id = gen.next_id()
second_id = gen.next_id()
self.assertEqual(first_id, 'PRE000001')
self.assertEqual(second_id, 'PRE000002')
class LinkProcessing(unittest.TestCase):
def setUp(self):
id_index = {
'OLDID1': 'NEWID1',
'OLDID2': 'NEWID2',
'OLDID3': 'NEWID3'}
label_index = {
'NEWID1': 'label 1',
'NEWID2': 'label 2',
'NEWID3': 'label 3'}
link_markers = {'linkmarker1', 'linkmarker2'}
link_regex = r'\bOLDID\d+\b'
self.link_processor = s.LinkProcessor(
id_index, label_index, link_markers, link_regex)
def test_entries_without_links_dont_change(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'no link'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'no link'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_single_link_is_replaced(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_links_in_different_markers_are_replaced(self):
original_entry = sfm.Entry([
('linkmarker1', 'link: OLDID2'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'link: [label 2](NEWID2)'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_links_in_same_marker_are_replaced(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: OLDID1; link 2: OLDID2'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: [label 1](NEWID1); link 2: [label 2](NEWID2)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_same_link_twice_in_the_same_marker(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: OLDID1; link 2: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: [label 1](NEWID1); link 2: [label 1](NEWID1)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_only_process_links_in_specified_markers(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'link: OLDID2')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'link: OLDID2')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_ignore_regex_matches_that_are_not_in_the_index(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1000'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1000'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_dont_mutate_original_entry(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
_ = self.link_processor(original_entry)
self.assertEqual(original_entry, expected)
def test_carry_over_attributes(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
original_entry.id = 'I have an ID, too!'
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'no link')])
expected.id = 'I have an ID, too!'
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
class MediaCaptionExtraction(unittest.TestCase):
def test_find_caption(self):
entry = sfm.Entry([
('marker1', 'val1'),
('pc', 'image-name'),
('cap', 'caption'),
('marker2', 'val2')])
caption_finder = s.CaptionFinder(['pc'], 'cap')
_ = caption_finder(entry)
expected = {'image-name': 'caption'}
self.assertEqual(caption_finder.captions, expected)
def test_find_multiple_captions(self):
entry = sfm.Entry([
('marker1', 'val1'),
('pc', 'image1-name'),
('cap', 'caption1'),
('marker2', 'val2'),
('pc', 'image2-name'),
('cap', 'caption2'),
('marker3', 'val3')])
caption_finder = s.CaptionFinder(['pc'], 'cap')
_ = caption_finder(entry)
expected = {
'image1-name': 'caption1',
'image2-name': 'caption2'}
self.assertEqual(caption_finder.captions, expected)
def test_captions_need_to_be_adjacent(self):
entry = sfm.Entry([
('marker1', 'val1'),
('pc', 'image-name'),
('marker2', 'val2'),
('cap', 'caption'),
('marker3', 'val3')])
caption_finder = s.CaptionFinder(['pc'], 'cap')
_ = caption_finder(entry)
expected = {}
self.assertEqual(caption_finder.captions, expected)
class MapSfmToCldf(unittest.TestCase):
def setUp(self):
self.mapping = {'marker1': 'Column1', 'marker2': 'Column2'}
def test_map_id(self):
sfm_entry = sfm.Entry()
sfm_entry.id = 'id1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(cldf_row, {'ID': 'id1'})
def test_map_columns(self):
sfm_entry = sfm.Entry([('marker1', 'value1'), ('marker2', 'value2')])
sfm_entry.id = 'id1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Column2': 'value2'})
def test_ignore_unexpected_sfm_markers(self):
sfm_entry = sfm.Entry([('marker1', 'value1'), ('unknown', 'value2')])
sfm_entry.id = 'id1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1'})
def test_map_entry_id(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.entry_id = 'entry1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Entry_ID': 'entry1'})
def test_map_sense_ids(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.sense_ids = ['sense1', 'sense2']
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Sense_IDs': ['sense1', 'sense2']})
def test_map_language_id(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.sense_ids = ['sense1', 'sense2']
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry, 'lang1')
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Sense_IDs': ['sense1', 'sense2'], 'Language_ID': 'lang1'})
def test_map_media_ids(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.media_ids = ['file1', 'file2']
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Media_IDs': ['file1', 'file2']})
def test_gloss():
sfm_entry = sfm.Entry([('ge', 'abc\tdef')])
cldf_row = s.sfm_entry_to_cldf_row(None, {'ge': 'Gloss'}, {}, set(), sfm_entry)
assert cldf_row['Gloss'] == 'abc\tdef'
cldf_row = s.sfm_entry_to_cldf_row('ExampleTable', {'ge': 'Gloss'}, {}, set(), sfm_entry)
assert cldf_row['Gloss'] == ['abc', 'def']
def test_cf():
sfm_entry = sfm.Entry([('cf', 'val1'), ('cf', 'val2;val3')])
cldf_row = s.sfm_entry_to_cldf_row('EntryTable', {'cf': 'Entry_IDs'}, {}, {'Entry_IDs'}, sfm_entry)
assert cldf_row['Entry_IDs'] == ['val1', 'val2', 'val3']
def test_multimarkers():
sfm_entry = sfm.Entry([('cf', 'val1'), ('cf', 'val2')])
cldf_row = s.sfm_entry_to_cldf_row(None, {'cf': 'See_Also'}, {}, set(), sfm_entry)
assert cldf_row['See_Also'] == 'val1 ; val2'
|
[
"pydictionaria.sfm2cldf.LinkProcessor",
"pydictionaria.sfm2cldf.sfm_entry_to_cldf_row",
"pydictionaria.sfm2cldf.split_by_pred",
"clldutils.sfm.Entry",
"pydictionaria.sfm2cldf.IDGenerator",
"pydictionaria.sfm2cldf.group_by_separator",
"pydictionaria.sfm2cldf.CaptionFinder"
] |
[((11518, 11549), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('ge', 'abc\\tdef')]"], {}), "([('ge', 'abc\\tdef')])\n", (11527, 11549), True, 'import clldutils.sfm as sfm\n'), ((11851, 11899), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('cf', 'val1'), ('cf', 'val2;val3')]"], {}), "([('cf', 'val1'), ('cf', 'val2;val3')])\n", (11860, 11899), True, 'import clldutils.sfm as sfm\n'), ((11915, 12008), 'pydictionaria.sfm2cldf.sfm_entry_to_cldf_row', 's.sfm_entry_to_cldf_row', (['"""EntryTable"""', "{'cf': 'Entry_IDs'}", '{}', "{'Entry_IDs'}", 'sfm_entry'], {}), "('EntryTable', {'cf': 'Entry_IDs'}, {}, {'Entry_IDs'\n }, sfm_entry)\n", (11938, 12008), True, 'import pydictionaria.sfm2cldf as s\n'), ((12108, 12151), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('cf', 'val1'), ('cf', 'val2')]"], {}), "([('cf', 'val1'), ('cf', 'val2')])\n", (12117, 12151), True, 'import clldutils.sfm as sfm\n'), ((1119, 1152), 'pydictionaria.sfm2cldf.split_by_pred', 's.split_by_pred', (['iseven', 'elements'], {}), '(iseven, elements)\n', (1134, 1152), True, 'import pydictionaria.sfm2cldf as s\n'), ((1376, 1409), 'pydictionaria.sfm2cldf.split_by_pred', 's.split_by_pred', (['iseven', 'elements'], {}), '(iseven, elements)\n', (1391, 1409), True, 'import pydictionaria.sfm2cldf as s\n'), ((1637, 1670), 'pydictionaria.sfm2cldf.split_by_pred', 's.split_by_pred', (['iseven', 'elements'], {}), '(iseven, elements)\n', (1652, 1670), True, 'import pydictionaria.sfm2cldf as s\n'), ((1858, 1873), 'pydictionaria.sfm2cldf.IDGenerator', 's.IDGenerator', ([], {}), '()\n', (1871, 1873), True, 'import pydictionaria.sfm2cldf as s\n'), ((2006, 2021), 'pydictionaria.sfm2cldf.IDGenerator', 's.IDGenerator', ([], {}), '()\n', (2019, 2021), True, 'import pydictionaria.sfm2cldf as s\n'), ((2229, 2249), 'pydictionaria.sfm2cldf.IDGenerator', 's.IDGenerator', (['"""PRE"""'], {}), "('PRE')\n", (2242, 2249), True, 'import pydictionaria.sfm2cldf as s\n'), ((2840, 2904), 'pydictionaria.sfm2cldf.LinkProcessor', 's.LinkProcessor', (['id_index', 'label_index', 'link_markers', 'link_regex'], {}), '(id_index, label_index, link_markers, link_regex)\n', (2855, 2904), True, 'import pydictionaria.sfm2cldf as s\n'), ((2998, 3098), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'no link'), ('othermarker',\n 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'no link'), (\n 'othermarker', 'no link')])\n", (3007, 3098), True, 'import clldutils.sfm as sfm\n'), ((3150, 3250), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'no link'), ('othermarker',\n 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'no link'), (\n 'othermarker', 'no link')])\n", (3159, 3250), True, 'import clldutils.sfm as sfm\n'), ((3455, 3560), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')])\n", (3464, 3560), True, 'import clldutils.sfm as sfm\n'), ((3612, 3727), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: [label 1](NEWID1)'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link: [label 1](NEWID1)'), ('othermarker', 'no link')])\n", (3621, 3727), True, 'import clldutils.sfm as sfm\n'), ((3949, 4058), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'link: OLDID2'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'link: OLDID2'), ('linkmarker2', 'link: OLDID1'),\n ('othermarker', 'no link')])\n", (3958, 4058), True, 'import clldutils.sfm as sfm\n'), ((4111, 4242), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'link: [label 2](NEWID2)'), ('linkmarker2',\n 'link: [label 1](NEWID1)'), ('othermarker', 'no link')]"], {}), "([('linkmarker1', 'link: [label 2](NEWID2)'), ('linkmarker2',\n 'link: [label 1](NEWID1)'), ('othermarker', 'no link')])\n", (4120, 4242), True, 'import clldutils.sfm as sfm\n'), ((4458, 4580), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: OLDID1; link 2: OLDID2'), ('othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: OLDID1; link 2: OLDID2'), ('othermarker', 'no link')])\n", (4467, 4580), True, 'import clldutils.sfm as sfm\n'), ((4633, 4781), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: [label 1](NEWID1); link 2: [label 2](NEWID2)'), ('othermarker',\n 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: [label 1](NEWID1); link 2: [label 2](NEWID2)'), ('othermarker',\n 'no link')])\n", (4642, 4781), True, 'import clldutils.sfm as sfm\n'), ((4994, 5116), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: OLDID1; link 2: OLDID1'), ('othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: OLDID1; link 2: OLDID1'), ('othermarker', 'no link')])\n", (5003, 5116), True, 'import clldutils.sfm as sfm\n'), ((5169, 5317), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: [label 1](NEWID1); link 2: [label 1](NEWID1)'), ('othermarker',\n 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link 1: [label 1](NEWID1); link 2: [label 1](NEWID1)'), ('othermarker',\n 'no link')])\n", (5178, 5317), True, 'import clldutils.sfm as sfm\n'), ((5535, 5645), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'link: OLDID2')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'link: OLDID2')])\n", (5544, 5645), True, 'import clldutils.sfm as sfm\n'), ((5697, 5817), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: [label 1](NEWID1)'), (\n 'othermarker', 'link: OLDID2')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link: [label 1](NEWID1)'), ('othermarker', 'link: OLDID2')])\n", (5706, 5817), True, 'import clldutils.sfm as sfm\n'), ((6046, 6153), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1000'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1000'),\n ('othermarker', 'no link')])\n", (6055, 6153), True, 'import clldutils.sfm as sfm\n'), ((6206, 6313), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1000'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1000'),\n ('othermarker', 'no link')])\n", (6215, 6313), True, 'import clldutils.sfm as sfm\n'), ((6522, 6627), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')])\n", (6531, 6627), True, 'import clldutils.sfm as sfm\n'), ((6679, 6784), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')])\n", (6688, 6784), True, 'import clldutils.sfm as sfm\n'), ((6984, 7089), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2', 'link: OLDID1'), (\n 'othermarker', 'no link')])\n", (6993, 7089), True, 'import clldutils.sfm as sfm\n'), ((7190, 7305), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('linkmarker1', 'no link'), ('linkmarker2', 'link: [label 1](NEWID1)'), (\n 'othermarker', 'no link')]"], {}), "([('linkmarker1', 'no link'), ('linkmarker2',\n 'link: [label 1](NEWID1)'), ('othermarker', 'no link')])\n", (7199, 7305), True, 'import clldutils.sfm as sfm\n'), ((7585, 7685), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'val1'), ('pc', 'image-name'), ('cap', 'caption'), ('marker2',\n 'val2')]"], {}), "([('marker1', 'val1'), ('pc', 'image-name'), ('cap', 'caption'), (\n 'marker2', 'val2')])\n", (7594, 7685), True, 'import clldutils.sfm as sfm\n'), ((7755, 7785), 'pydictionaria.sfm2cldf.CaptionFinder', 's.CaptionFinder', (["['pc']", '"""cap"""'], {}), "(['pc'], 'cap')\n", (7770, 7785), True, 'import pydictionaria.sfm2cldf as s\n'), ((7985, 8156), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'val1'), ('pc', 'image1-name'), ('cap', 'caption1'), (\n 'marker2', 'val2'), ('pc', 'image2-name'), ('cap', 'caption2'), (\n 'marker3', 'val3')]"], {}), "([('marker1', 'val1'), ('pc', 'image1-name'), ('cap', 'caption1'),\n ('marker2', 'val2'), ('pc', 'image2-name'), ('cap', 'caption2'), (\n 'marker3', 'val3')])\n", (7994, 8156), True, 'import clldutils.sfm as sfm\n'), ((8258, 8288), 'pydictionaria.sfm2cldf.CaptionFinder', 's.CaptionFinder', (["['pc']", '"""cap"""'], {}), "(['pc'], 'cap')\n", (8273, 8288), True, 'import pydictionaria.sfm2cldf as s\n'), ((8548, 8668), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'val1'), ('pc', 'image-name'), ('marker2', 'val2'), ('cap',\n 'caption'), ('marker3', 'val3')]"], {}), "([('marker1', 'val1'), ('pc', 'image-name'), ('marker2', 'val2'),\n ('cap', 'caption'), ('marker3', 'val3')])\n", (8557, 8668), True, 'import clldutils.sfm as sfm\n'), ((8751, 8781), 'pydictionaria.sfm2cldf.CaptionFinder', 's.CaptionFinder', (["['pc']", '"""cap"""'], {}), "(['pc'], 'cap')\n", (8766, 8781), True, 'import pydictionaria.sfm2cldf as s\n'), ((9077, 9088), 'clldutils.sfm.Entry', 'sfm.Entry', ([], {}), '()\n', (9086, 9088), True, 'import clldutils.sfm as sfm\n'), ((9306, 9363), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'value1'), ('marker2', 'value2')]"], {}), "([('marker1', 'value1'), ('marker2', 'value2')])\n", (9315, 9363), True, 'import clldutils.sfm as sfm\n'), ((9666, 9723), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'value1'), ('unknown', 'value2')]"], {}), "([('marker1', 'value1'), ('unknown', 'value2')])\n", (9675, 9723), True, 'import clldutils.sfm as sfm\n'), ((9988, 10022), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'value1')]"], {}), "([('marker1', 'value1')])\n", (9997, 10022), True, 'import clldutils.sfm as sfm\n'), ((10348, 10382), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'value1')]"], {}), "([('marker1', 'value1')])\n", (10357, 10382), True, 'import clldutils.sfm as sfm\n'), ((10736, 10770), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'value1')]"], {}), "([('marker1', 'value1')])\n", (10745, 10770), True, 'import clldutils.sfm as sfm\n'), ((11155, 11189), 'clldutils.sfm.Entry', 'sfm.Entry', (["[('marker1', 'value1')]"], {}), "([('marker1', 'value1')])\n", (11164, 11189), True, 'import clldutils.sfm as sfm\n'), ((444, 484), 'pydictionaria.sfm2cldf.group_by_separator', 's.group_by_separator', (['sep', 'input_markers'], {}), '(sep, input_markers)\n', (464, 484), True, 'import pydictionaria.sfm2cldf as s\n'), ((858, 898), 'pydictionaria.sfm2cldf.group_by_separator', 's.group_by_separator', (['sep', 'input_markers'], {}), '(sep, input_markers)\n', (878, 898), True, 'import pydictionaria.sfm2cldf as s\n')]
|
##-------------------------------------------
## 2 VARIABLE NORMAL DISTIBUTION
##-------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
#USER INPUTS
FUNC=2
FS=18 #FONT SIZE
CMAP='hsv' #'RdYlBu'
#normal distribution param
ux=0.5; uy=0.0
sx=2.0; sy=1.0 #STD-DEV
rho=0.5; #[0,1) RHO=PEARSON CORRELATION
u=np.array([[ux],[uy]]) #MEAN VECTOR u=[ux,uy]
s=np.array([[sx**2.0,rho*sy*sx],[rho*sy*sx,sy**2.0]]) #COVARIANCE METRIC
#GENERATE POINTS SAMPLED FROM DISTRIBUTION
xp, yp = np.random.multivariate_normal(u.reshape(2), s, 1000).T
# DEFINE FUNCTION
def N(x, y):
out=1.0/(2*3.1415*sx*sy*(1-rho**2.0)**0.5)
out=out*np.exp(-(((x-ux)/sx)**2.0-2*rho*((x-ux)/sx)*((y-uy)/sy)+((y-uy)/sy)**2.0)/(2*(1-rho**2)))
return out
#MESH-1 (SMALLER)
L=3*max(sx,sy)
xmin=-L; xmax=L; ymin=-L; ymax=L
x,y = np.meshgrid(np.linspace(xmin,xmax,20),np.linspace(ymin,ymax,20))
#MESH-2 (DENSER)
X, Y = np.meshgrid(np.linspace(xmin, xmax, 40), np.linspace(ymin, ymax, 40))
#SURFACE PLOT
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.set_xlabel('x', fontsize=FS); ax.set_ylabel('y', fontsize=FS); ax.set_zlabel('p(x,y)', fontsize=FS)
surf=ax.plot_surface(X, Y, N(X, Y), cmap=CMAP)
ax.scatter(xp, yp, 1.1*np.max(N(X, Y)) , '.')
plt.show();
#SCATTER PLOT
plt.plot(xp, yp,'.')
#CONTOUR PLOT
# plt.axis('equal')
plt.contour(X, Y, N(X, Y), 20, cmap=CMAP);
plt.show();
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((364, 386), 'numpy.array', 'np.array', (['[[ux], [uy]]'], {}), '([[ux], [uy]])\n', (372, 386), True, 'import numpy as np\n'), ((417, 483), 'numpy.array', 'np.array', (['[[sx ** 2.0, rho * sy * sx], [rho * sy * sx, sy ** 2.0]]'], {}), '([[sx ** 2.0, rho * sy * sx], [rho * sy * sx, sy ** 2.0]])\n', (425, 483), True, 'import numpy as np\n'), ((1043, 1088), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (1055, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1294, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1335), 'matplotlib.pyplot.plot', 'plt.plot', (['xp', 'yp', '"""."""'], {}), "(xp, yp, '.')\n", (1322, 1335), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1423, 1425), True, 'import matplotlib.pyplot as plt\n'), ((869, 896), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(20)'], {}), '(xmin, xmax, 20)\n', (880, 896), True, 'import numpy as np\n'), ((895, 922), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(20)'], {}), '(ymin, ymax, 20)\n', (906, 922), True, 'import numpy as np\n'), ((959, 986), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(40)'], {}), '(xmin, xmax, 40)\n', (970, 986), True, 'import numpy as np\n'), ((988, 1015), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(40)'], {}), '(ymin, ymax, 40)\n', (999, 1015), True, 'import numpy as np\n'), ((682, 813), 'numpy.exp', 'np.exp', (['(-(((x - ux) / sx) ** 2.0 - 2 * rho * ((x - ux) / sx) * ((y - uy) / sy) + (\n (y - uy) / sy) ** 2.0) / (2 * (1 - rho ** 2)))'], {}), '(-(((x - ux) / sx) ** 2.0 - 2 * rho * ((x - ux) / sx) * ((y - uy) /\n sy) + ((y - uy) / sy) ** 2.0) / (2 * (1 - rho ** 2)))\n', (688, 813), True, 'import numpy as np\n')]
|
"""Unit tests for reviewboard.extensions.hooks.FileDiffACLHook."""
import kgb
from djblets.features.testing import override_feature_check
from reviewboard.extensions.hooks import FileDiffACLHook
from reviewboard.extensions.tests.testcases import BaseExtensionHookTestCase
from reviewboard.reviews.features import DiffACLsFeature
class FileDiffACLHookTests(kgb.SpyAgency, BaseExtensionHookTestCase):
"""Tests for the FileDiffACLHook."""
fixtures = ['test_scmtools', 'test_users']
def setUp(self):
super(FileDiffACLHookTests, self).setUp()
self.user = self.create_user()
self.review_request = self.create_review_request(
create_repository=True)
self.review_request.target_people.add(self.review_request.submitter)
self.create_diffset(review_request=self.review_request, draft=True)
self.review_request.publish(user=self.review_request.submitter)
def test_single_aclhook_true(self):
"""Testing FileDiffACLHook basic approval with True result"""
self._test_hook_approval_sequence([True], True)
def test_single_aclhook_none(self):
"""Testing FileDiffACLHook basic approval with None result"""
self._test_hook_approval_sequence([None], True)
def test_single_aclhook_false(self):
"""Testing FileDiffACLHook basic approval with False result"""
self._test_hook_approval_sequence([False], False)
def test_multiple_aclhooks_1(self):
"""Testing FileDiffACLHook multiple with True and False"""
self._test_hook_approval_sequence([True, False], False)
def test_multiple_aclhooks_2(self):
"""Testing FileDiffACLHook multiple with True and None"""
self._test_hook_approval_sequence([True, None], True)
def test_multiple_aclhooks_3(self):
"""Testing FileDiffACLHook multiple with False and None"""
self._test_hook_approval_sequence([False, None], False)
def _test_hook_approval_sequence(self, accessible_values, result):
"""Test a sequence of FileDiffACLHook approval results.
Args:
accessible_values (list of bool):
A list of the values to return from FileDiffACLHook
implementations.
result (bool):
A resulting approval value to check.
"""
with override_feature_check(DiffACLsFeature.feature_id,
enabled=True):
for value in accessible_values:
hook = FileDiffACLHook(extension=self.extension)
self.spy_on(hook.is_accessible, op=kgb.SpyOpReturn(value))
self.assertEqual(self.review_request.is_accessible_by(self.user),
result)
|
[
"reviewboard.extensions.hooks.FileDiffACLHook",
"kgb.SpyOpReturn",
"djblets.features.testing.override_feature_check"
] |
[((2345, 2409), 'djblets.features.testing.override_feature_check', 'override_feature_check', (['DiffACLsFeature.feature_id'], {'enabled': '(True)'}), '(DiffACLsFeature.feature_id, enabled=True)\n', (2367, 2409), False, 'from djblets.features.testing import override_feature_check\n'), ((2514, 2555), 'reviewboard.extensions.hooks.FileDiffACLHook', 'FileDiffACLHook', ([], {'extension': 'self.extension'}), '(extension=self.extension)\n', (2529, 2555), False, 'from reviewboard.extensions.hooks import FileDiffACLHook\n'), ((2607, 2629), 'kgb.SpyOpReturn', 'kgb.SpyOpReturn', (['value'], {}), '(value)\n', (2622, 2629), False, 'import kgb\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from torchsummaryX import summary
from torch.nn.utils import weight_norm, remove_weight_norm
from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up,walk_ratent_space
from typing import Tuple
from torchsummaryX import summary
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
LRELU_SLOPE = 0.1
class ResBlock(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
assert len(dilation) == 3
self.convs1 = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class Encoder(nn.Module):
def __init__(self, h):
super().__init__()
self.h = h
rks = h.resblock_kernel_sizes
rds = h.resblock_dilation_sizes
drs = h.downsample_rates
drks = h.downsample_kernel_sizes
dci = h.downsample_initial_channel
self.num_kernels = len(rks)
self.num_downsamples = len(drs)
self.conv_pre = weight_norm(nn.Conv1d(1, dci, 7,1,3))
# get expected input lengthes and output lengths
init_len = h.n_fft
self.L_ins = [init_len]
self.L_outs = []
for r in drs:
lo = int(init_len/r)
self.L_outs.append(lo)
self.L_ins.append(lo)
init_len = lo
self.L_outs.append(1)
# get downsampling paddings
self.pads = []
for i,r in enumerate(drs):
pad = get_padding_down(self.L_ins[i],self.L_outs[i],drks[i],r)
self.pads.append(pad)
# get downsampling channels
self.channels = []
for i in range(len(drs)+1):
self.channels.append(dci*(2**i))
self.dns = nn.ModuleList()
for i, (u, k) in enumerate(zip(drs, drks)):
self.dns.append(weight_norm(
nn.Conv1d(self.channels[i], self.channels[i+1],k,u,self.pads[i])
))
self.resblocks = nn.ModuleList()
for i in range(len(self.dns)):
ch = self.channels[i+1]
for j,(k,d) in enumerate(zip(rks,rds)):
self.resblocks.append(ResBlock(ch,k,d))
self.conv_post = weight_norm(nn.Conv1d(self.channels[-1],h.ratent_dim,self.L_ins[-1]))
self.conv_post_var = weight_norm(nn.Conv1d(self.channels[-1],h.ratent_dim,self.L_ins[-1]))
self.dns.apply(init_weights)
self.conv_post.apply(init_weights)
self.conv_post_var.apply(init_weights)
def forward(self, x:torch.Tensor) -> torch.Tensor:
x = self.conv_pre(x)
for i in range(self.num_downsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.dns[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
mean = self.conv_post(x)
var = F.softplus(self.conv_post_var(x)) + 1e-8
return mean,var
def dual_flow(self, x1:torch.Tensor, x2:torch.Tensor,with_random:bool=True) -> torch.Tensor:
mean1,var1 = self.forward(x1)
mean2,var2 = self.forward(x2)
if with_random:
out1 = self.random_sample(mean1,var1)
out2 = self.random_sample(mean2,var2)
else:
out1,out2 = mean1,mean2
out = torch.cat([out1, out2], dim=1) #.tanh() # notanh
return out
@staticmethod
def random_sample(mean:torch.Tensor, var:torch.Tensor):
return mean + torch.randn_like(mean)*torch.sqrt(var)
def summary(self):
dummy = torch.randn(1,1,self.h.n_fft)
summary(self, dummy)
def remove_weight_norm(self):
print("Removing weight norm...")
for l in self.dns:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Decoder(nn.Module):
def __init__(self, h) -> None:
super().__init__()
self.h = h
rks = h.resblock_kernel_sizes
rds = h.resblock_dilation_sizes
uik = h.upsample_initial_kernel
urs = h.upsample_rates
urks = h.upsample_kernel_sizes
uic = h.upsample_initial_channel
self.out_len = h.n_fft +h.hop_len
self.num_kernels = len(rks)
self.num_upsamples = len(urs)
self.conv_pre = weight_norm(nn.ConvTranspose1d(h.ratent_dim*2, uic,uik))
# get expected input lengthes and output lengthes
init_len = uik
self.L_ins = [init_len]
self.L_outs = []
for r in urs:
lo = init_len * r
self.L_ins.append(lo)
self.L_outs.append(lo)
init_len = lo
# get upsampling paddings
self.pads = []
for i,r in enumerate(urs):
pad = get_padding_up(self.L_ins[i],self.L_outs[i],urks[i],r)
self.pads.append(pad)
# get upsampling channels
self.channels = [uic]
ch = uic
for i in range(len(urs)):
self.channels.append(int(ch/(2**i)))
self.ups = nn.ModuleList()
for i, (u,k) in enumerate(zip(urs,urks)):
self.ups.append(weight_norm(
nn.ConvTranspose1d(self.channels[i], self.channels[i+1],k,u,self.pads[i])
))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = self.channels[i+1]
for j,(k,d) in enumerate(zip(rks,rds)):
self.resblocks.append(ResBlock(ch,k,d))
self.conv_post = weight_norm(nn.Conv1d(self.channels[-1],1,7,1,3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x:torch.Tensor) -> torch.Tensor:
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
l = x.size(-1)
start = int((l - self.out_len)/2)
x = x[:,:,start:start+self.out_len]
#x = x.tanh() # grad explosion ?
return x
def summary(self):
dummy = torch.randn(1,self.h.ratent_dim*2,1)
summary(self,dummy)
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class VoiceBand(pl.LightningModule):
def __init__(self, h,dtype:torch.dtype=torch.float,device:torch.device='cpu') -> None:
super().__init__()
self.h = h
self.reset_seed()
self.encoder = Encoder(h).type(dtype).to(self.device)
self.decoder = Decoder(h).type(dtype).to(self.device)
self.n_fft = h.n_fft
self.ratent_dim = h.ratent_dim
self.walking_steps = int(h.breath_len / h.hop_len) + 1
self.walking_resolution = h.walking_resolution
self.out_len = self.decoder.out_len
self.view_interval = 10
self.kl_lambda = h.kl_lambda
# training settings
self.MSE = nn.MSELoss()
self.MAE = nn.L1Loss()
self.actions = walk_ratent_space(self.ratent_dim, self.walking_steps,self.walking_resolution,device=device,dtype=dtype)
def forward(self, x1:torch.Tensor,x2:torch.Tensor) -> torch.Tensor:
"""
x1: (-1, 1, n_fft)
x2: (-1, 1, n_fft)
"""
mean1,var1 = self.encoder.forward(x1)
mean2,var2 = self.encoder.forward(x2)
mean,var = torch.cat([mean1,mean2],dim=1),torch.cat([var1,var2],dim=1)
out = self.encoder.random_sample(mean,var)#.tanh()# notanh
out = self.decoder(out)
return out,mean,var
def on_fit_start(self) -> None:
self.logger.log_hyperparams(self.h)
def training_step(self, batch:Tuple[torch.Tensor], batch_idx) -> torch.Tensor:
"""
batch : (-1, ch, n_fft+hop_len)
"""
sound, = batch
sound = sound.type(self.dtype)
if self.h.random_gain:
sound= self.random_gain(sound)
x1,x2,ans = sound[:,:,:self.h.n_fft], sound[:,:,-self.h.n_fft:], sound
out_,mean,var = self.forward(x1,x2)
out = out_.tanh() # atanh grad explotsion
mse = self.MSE(ans, out)
mae = self.MAE(ans,out)
KL = 0.5*torch.sum(
torch.pow(mean,2) +
var -
torch.log(var) -1
).sum() / out.size(0)
#marginal_likelihood = self.BCEwithLogits(torch.atanh(out),0.5*ans+1)
#print(True in torch.isnan(out))
marginal_likelihood= F.binary_cross_entropy_with_logits(out,0.5*ans+1,reduction="sum") / out.size(0)
loss = marginal_likelihood + KL * self.kl_lambda
#loss = self.kl_lambda * KL + mse
self.log("loss",loss)
self.log("mse",mse)
self.log("mae",mae)
self.log("KL div",KL)
self.log("Marginal likelihood",marginal_likelihood)
return loss
@torch.no_grad()
def on_epoch_end(self) -> None:
"""
walk through the ratent space and log audio wave.
"""
if self.current_epoch%self.view_interval !=0:
return
self.actions = walk_ratent_space(self.ratent_dim, self.walking_steps,self.walking_resolution,
device=self.device,dtype=self.dtype)
wave = None
for act in self.actions.unsqueeze(1):
wave= self.predict_one_step(act,wave)
wave = wave.squeeze(0).T.detach().cpu().numpy()
# tensorboard logging
tb:SummaryWriter = self.logger.experiment
tb.add_audio("Ratent space audio",wave, self.current_epoch,self.h.frame_rate)
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(wave)
tb.add_figure("Walked wave",fig, self.current_epoch)
return
def random_gain(self, sound:torch.Tensor) -> torch.Tensor:
n,c,l = sound.shape
maxes= sound.view(n,c*l).abs().max(dim=1,keepdim=True).values.unsqueeze(-1)
maxes[maxes==0.0] = 1.0
gains = torch.rand_like(maxes)
sound = (sound/maxes) * gains
return sound
def configure_optimizers(self):
optim = torch.optim.AdamW(self.parameters(), self.h.lr,[self.h.adam_b1,self.h.adam_b2])
scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.h.lr_decay)
scheduler.last_epoch=self.trainer.max_epochs
return [optim],[scheduler]
silence = None
def set_silence(self):
self.silence = torch.zeros(1,self.h.sample_ch,self.n_fft,device=self.device,dtype=self.dtype)
def set_view_interval(self, interval:int=None):
if interval:
self.view_interval= interval
def predict_one_step(self, action:torch.Tensor,previous_wave:torch.Tensor=None) -> torch.Tensor:
"""
action : (-1, ratent_dim, 1)
previous_wave : (-1,ch, l)
"""
if previous_wave is None:
if self.silence is None:
self.set_silence()
previous_wave = self.silence
assert len(action.shape) == 3
assert len(previous_wave.shape) == 3
if previous_wave.size(-1) < self.n_fft :
pad_len = self.n_fft - previous_wave.size(-1)
n,c,l = previous_wave.shape
pad = torch.zeros(n,c,pad_len,dtype=previous_wave.dtype,device=previous_wave.device)
previous_wave = torch.cat([pad,previous_wave],dim=-1)
enc_in = previous_wave[:,:,-self.n_fft:].to(self.dtype).to(self.device)
encoded = self.encoder.forward(enc_in)[0]#.tanh()# notanh
dec_in = torch.cat([encoded,action],dim=1)
d_out = self.decoder.forward(dec_in)[:,:,self.n_fft:].type_as(previous_wave)
d_out = d_out.tanh() # grad explosion ?
wave = torch.cat([previous_wave,d_out],dim=-1)
return wave
def reset_seed(self):
seed = self.h.seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def summary(self,tensorboard:bool = True):
dummy = torch.randn(1,1,self.n_fft)
summary(self, dummy,dummy)
if tensorboard:
writer = SummaryWriter()
writer.add_graph(self, [dummy,dummy])
def remove_weight_norm(self):
self.encoder.remove_weight_norm()
self.decoder.remove_weight_norm()
if __name__ == '__main__':
from utils import load_config
config = load_config("hparams/origin.json")
model = VoiceBand(config)
model.summary()
model.remove_weight_norm()
|
[
"numpy.random.seed",
"torch.sqrt",
"torch.cat",
"torch.randn",
"utils.get_padding",
"matplotlib.pyplot.figure",
"torch.rand_like",
"utils.load_config",
"torch.nn.functional.leaky_relu",
"torchsummaryX.summary",
"torch.no_grad",
"utils.walk_ratent_space",
"torch.nn.MSELoss",
"torch.nn.Conv1d",
"torch.nn.functional.binary_cross_entropy_with_logits",
"random.seed",
"torch.optim.lr_scheduler.ExponentialLR",
"utils.get_padding_up",
"torch.utils.tensorboard.SummaryWriter",
"torch.zeros",
"torch.log",
"torch.randn_like",
"torch.nn.ModuleList",
"utils.get_padding_down",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.pow",
"torch.nn.utils.remove_weight_norm",
"torch.nn.L1Loss",
"torch.nn.ConvTranspose1d"
] |
[((11251, 11266), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11264, 11266), False, 'import torch\n'), ((14800, 14834), 'utils.load_config', 'load_config', (['"""hparams/origin.json"""'], {}), "('hparams/origin.json')\n", (14811, 14834), False, 'from utils import load_config\n'), ((3380, 3395), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3393, 3395), True, 'import torch.nn as nn\n'), ((3611, 3626), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3624, 3626), True, 'import torch.nn as nn\n'), ((4649, 4664), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (4661, 4664), True, 'import torch.nn.functional as F\n'), ((5148, 5178), 'torch.cat', 'torch.cat', (['[out1, out2]'], {'dim': '(1)'}), '([out1, out2], dim=1)\n', (5157, 5178), False, 'import torch\n'), ((5396, 5427), 'torch.randn', 'torch.randn', (['(1)', '(1)', 'self.h.n_fft'], {}), '(1, 1, self.h.n_fft)\n', (5407, 5427), False, 'import torch\n'), ((5434, 5454), 'torchsummaryX.summary', 'summary', (['self', 'dummy'], {}), '(self, dummy)\n', (5441, 5454), False, 'from torchsummaryX import summary\n'), ((5668, 5701), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_pre'], {}), '(self.conv_pre)\n', (5686, 5701), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((5710, 5744), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_post'], {}), '(self.conv_post)\n', (5728, 5744), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((6967, 6982), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6980, 6982), True, 'import torch.nn as nn\n'), ((7205, 7220), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7218, 7220), True, 'import torch.nn as nn\n'), ((8066, 8081), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (8078, 8081), True, 'import torch.nn.functional as F\n'), ((8319, 8359), 'torch.randn', 'torch.randn', (['(1)', '(self.h.ratent_dim * 2)', '(1)'], {}), '(1, self.h.ratent_dim * 2, 1)\n', (8330, 8359), False, 'import torch\n'), ((8364, 8384), 'torchsummaryX.summary', 'summary', (['self', 'dummy'], {}), '(self, dummy)\n', (8371, 8384), False, 'from torchsummaryX import summary\n'), ((8597, 8630), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_pre'], {}), '(self.conv_pre)\n', (8615, 8630), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((8639, 8673), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_post'], {}), '(self.conv_post)\n', (8657, 8673), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((9345, 9357), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (9355, 9357), True, 'import torch.nn as nn\n'), ((9377, 9388), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (9386, 9388), True, 'import torch.nn as nn\n'), ((9413, 9525), 'utils.walk_ratent_space', 'walk_ratent_space', (['self.ratent_dim', 'self.walking_steps', 'self.walking_resolution'], {'device': 'device', 'dtype': 'dtype'}), '(self.ratent_dim, self.walking_steps, self.\n walking_resolution, device=device, dtype=dtype)\n', (9430, 9525), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((11482, 11604), 'utils.walk_ratent_space', 'walk_ratent_space', (['self.ratent_dim', 'self.walking_steps', 'self.walking_resolution'], {'device': 'self.device', 'dtype': 'self.dtype'}), '(self.ratent_dim, self.walking_steps, self.\n walking_resolution, device=self.device, dtype=self.dtype)\n', (11499, 11604), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((11991, 12003), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12001, 12003), True, 'import matplotlib.pyplot as plt\n'), ((12358, 12380), 'torch.rand_like', 'torch.rand_like', (['maxes'], {}), '(maxes)\n', (12373, 12380), False, 'import torch\n'), ((12593, 12661), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optim'], {'gamma': 'self.h.lr_decay'}), '(optim, gamma=self.h.lr_decay)\n', (12631, 12661), False, 'import torch\n'), ((12820, 12907), 'torch.zeros', 'torch.zeros', (['(1)', 'self.h.sample_ch', 'self.n_fft'], {'device': 'self.device', 'dtype': 'self.dtype'}), '(1, self.h.sample_ch, self.n_fft, device=self.device, dtype=self\n .dtype)\n', (12831, 12907), False, 'import torch\n'), ((13937, 13972), 'torch.cat', 'torch.cat', (['[encoded, action]'], {'dim': '(1)'}), '([encoded, action], dim=1)\n', (13946, 13972), False, 'import torch\n'), ((14119, 14160), 'torch.cat', 'torch.cat', (['[previous_wave, d_out]'], {'dim': '(-1)'}), '([previous_wave, d_out], dim=-1)\n', (14128, 14160), False, 'import torch\n'), ((14249, 14269), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14263, 14269), True, 'import numpy as np\n'), ((14278, 14301), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (14295, 14301), False, 'import torch\n'), ((14310, 14338), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (14332, 14338), False, 'import torch\n'), ((14347, 14364), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14358, 14364), False, 'import random\n'), ((14429, 14458), 'torch.randn', 'torch.randn', (['(1)', '(1)', 'self.n_fft'], {}), '(1, 1, self.n_fft)\n', (14440, 14458), False, 'import torch\n'), ((14465, 14492), 'torchsummaryX.summary', 'summary', (['self', 'dummy', 'dummy'], {}), '(self, dummy, dummy)\n', (14472, 14492), False, 'from torchsummaryX import summary\n'), ((1903, 1931), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', 'LRELU_SLOPE'], {}), '(x, LRELU_SLOPE)\n', (1915, 1931), True, 'import torch.nn.functional as F\n'), ((1973, 2002), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['xt', 'LRELU_SLOPE'], {}), '(xt, LRELU_SLOPE)\n', (1985, 2002), True, 'import torch.nn.functional as F\n'), ((2144, 2165), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (2162, 2165), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((2208, 2229), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (2226, 2229), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((2647, 2673), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', 'dci', '(7)', '(1)', '(3)'], {}), '(1, dci, 7, 1, 3)\n', (2656, 2673), True, 'import torch.nn as nn\n'), ((3116, 3175), 'utils.get_padding_down', 'get_padding_down', (['self.L_ins[i]', 'self.L_outs[i]', 'drks[i]', 'r'], {}), '(self.L_ins[i], self.L_outs[i], drks[i], r)\n', (3132, 3175), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((3856, 3914), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[-1]', 'h.ratent_dim', 'self.L_ins[-1]'], {}), '(self.channels[-1], h.ratent_dim, self.L_ins[-1])\n', (3865, 3914), True, 'import torch.nn as nn\n'), ((3955, 4013), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[-1]', 'h.ratent_dim', 'self.L_ins[-1]'], {}), '(self.channels[-1], h.ratent_dim, self.L_ins[-1])\n', (3964, 4013), True, 'import torch.nn as nn\n'), ((4287, 4315), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', 'LRELU_SLOPE'], {}), '(x, LRELU_SLOPE)\n', (4299, 4315), True, 'import torch.nn.functional as F\n'), ((5570, 5591), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (5588, 5591), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((6235, 6281), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(h.ratent_dim * 2)', 'uic', 'uik'], {}), '(h.ratent_dim * 2, uic, uik)\n', (6253, 6281), True, 'import torch.nn as nn\n'), ((6685, 6742), 'utils.get_padding_up', 'get_padding_up', (['self.L_ins[i]', 'self.L_outs[i]', 'urks[i]', 'r'], {}), '(self.L_ins[i], self.L_outs[i], urks[i], r)\n', (6699, 6742), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((7441, 7481), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[-1]', '(1)', '(7)', '(1)', '(3)'], {}), '(self.channels[-1], 1, 7, 1, 3)\n', (7450, 7481), True, 'import torch.nn as nn\n'), ((7704, 7732), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', 'LRELU_SLOPE'], {}), '(x, LRELU_SLOPE)\n', (7716, 7732), True, 'import torch.nn.functional as F\n'), ((8499, 8520), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (8517, 8520), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((9790, 9822), 'torch.cat', 'torch.cat', (['[mean1, mean2]'], {'dim': '(1)'}), '([mean1, mean2], dim=1)\n', (9799, 9822), False, 'import torch\n'), ((9821, 9851), 'torch.cat', 'torch.cat', (['[var1, var2]'], {'dim': '(1)'}), '([var1, var2], dim=1)\n', (9830, 9851), False, 'import torch\n'), ((10869, 10940), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['out', '(0.5 * ans + 1)'], {'reduction': '"""sum"""'}), "(out, 0.5 * ans + 1, reduction='sum')\n", (10903, 10940), True, 'import torch.nn.functional as F\n'), ((13620, 13707), 'torch.zeros', 'torch.zeros', (['n', 'c', 'pad_len'], {'dtype': 'previous_wave.dtype', 'device': 'previous_wave.device'}), '(n, c, pad_len, dtype=previous_wave.dtype, device=previous_wave.\n device)\n', (13631, 13707), False, 'import torch\n'), ((13727, 13766), 'torch.cat', 'torch.cat', (['[pad, previous_wave]'], {'dim': '(-1)'}), '([pad, previous_wave], dim=-1)\n', (13736, 13766), False, 'import torch\n'), ((14537, 14552), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (14550, 14552), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5317, 5339), 'torch.randn_like', 'torch.randn_like', (['mean'], {}), '(mean)\n', (5333, 5339), False, 'import torch\n'), ((5340, 5355), 'torch.sqrt', 'torch.sqrt', (['var'], {}), '(var)\n', (5350, 5355), False, 'import torch\n'), ((3505, 3574), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[i]', 'self.channels[i + 1]', 'k', 'u', 'self.pads[i]'], {}), '(self.channels[i], self.channels[i + 1], k, u, self.pads[i])\n', (3514, 3574), True, 'import torch.nn as nn\n'), ((7090, 7168), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.channels[i]', 'self.channels[i + 1]', 'k', 'u', 'self.pads[i]'], {}), '(self.channels[i], self.channels[i + 1], k, u, self.pads[i])\n', (7108, 7168), True, 'import torch.nn as nn\n'), ((826, 863), 'utils.get_padding', 'get_padding', (['kernel_size', 'dilation[0]'], {}), '(kernel_size, dilation[0])\n', (837, 863), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((998, 1035), 'utils.get_padding', 'get_padding', (['kernel_size', 'dilation[1]'], {}), '(kernel_size, dilation[1])\n', (1009, 1035), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1170, 1207), 'utils.get_padding', 'get_padding', (['kernel_size', 'dilation[2]'], {}), '(kernel_size, dilation[2])\n', (1181, 1207), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1421, 1448), 'utils.get_padding', 'get_padding', (['kernel_size', '(1)'], {}), '(kernel_size, 1)\n', (1432, 1448), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1573, 1600), 'utils.get_padding', 'get_padding', (['kernel_size', '(1)'], {}), '(kernel_size, 1)\n', (1584, 1600), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1725, 1752), 'utils.get_padding', 'get_padding', (['kernel_size', '(1)'], {}), '(kernel_size, 1)\n', (1736, 1752), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((10672, 10686), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (10681, 10686), False, 'import torch\n'), ((10622, 10640), 'torch.pow', 'torch.pow', (['mean', '(2)'], {}), '(mean, 2)\n', (10631, 10640), False, 'import torch\n')]
|
"""Perceptron implementation for apprenticeship learning in pacman.
Author: <NAME>, <NAME>, and <NAME>
Class: CSI-480-01
Assignment: PA 5 -- Supervised Learning
Due Date: Nov 30, 2018 11:59 PM
Certification of Authenticity:
I certify that this is entirely my own work, except where I have given
fully-documented references to the work of others. I understand the definition
and consequences of plagiarism and acknowledge that the assessor of this
assignment may, for the purpose of assessing this assignment:
- Reproduce this assignment and provide a copy to another member of academic
- staff; and/or Communicate a copy of this assignment to a plagiarism checking
- service (which may then retain a copy of this assignment on its database for
- the purpose of future plagiarism checking)
Champlain College CSI-480, Fall 2018
The following code was adapted by <NAME> (<EMAIL>)
from the UC Berkeley Pacman Projects (see license and attribution below).
----------------------
Licensing Information: You are free to use or extend these projects for
educational purposes provided that (1) you do not distribute or publish
solutions, (2) you retain this notice, and (3) you provide clear
attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
Attribution Information: The Pacman AI projects were developed at UC Berkeley.
The core projects and autograders were primarily created by <NAME>
(<EMAIL>) and <NAME> (<EMAIL>).
Student side autograding was added by <NAME>, <NAME>, and
<NAME> (<EMAIL>).
"""
import util
from perceptron import PerceptronClassifier
PRINT = True
class PerceptronClassifierPacman(PerceptronClassifier):
"""A PerceptronClassifier for apprenticeeship learning in pacman."""
def __init__(self, legal_labels, max_iterations):
"""Initialize the perceptron.
Args:
legal_labels: list of legal_labels
max_iterations: the max number of iterations to train for
"""
super().__init__(legal_labels, max_iterations)
self.weights = util.Counter()
def classify(self, data):
"""Classify the data points.
Data contains a list of (datum, legal moves)
Datum is a Counter representing the features of each GameState.
legal_moves is a list of legal moves for that GameState.
"""
guesses = []
for datum, legal_moves in data:
vectors = util.Counter()
for l in legal_moves:
vectors[l] = self.weights * datum[l]
guesses.append(vectors.arg_max())
return guesses
def train(self, training_data, training_labels, validation_data,
validation_labels):
"""Train the perceptron."""
# could be useful later
self.features = list(training_data[0][0]['Stop'].keys())
# DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR
# THE AUTOGRADER WILL LIKELY DEDUCT POINTS.
for iteration in range(self.max_iterations):
print("Starting iteration ", iteration, "...")
for (datum, legal_moves), label in zip(training_data,
training_labels):
# *** YOUR CODE HERE ***
# Gets the guess action, then updates the weights
guess = self.classify([(datum, legal_moves)])[0]
if guess != label:
self.weights += datum[label]
self.weights -= datum[guess]
|
[
"util.Counter"
] |
[((2037, 2051), 'util.Counter', 'util.Counter', ([], {}), '()\n', (2049, 2051), False, 'import util\n'), ((2407, 2421), 'util.Counter', 'util.Counter', ([], {}), '()\n', (2419, 2421), False, 'import util\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
Created on Tue Oct 6 16:23:04 2020
@author: Admin
"""
import numpy as np
import pandas as pd
import math
import os
from keras.layers import Dense
from keras.layers import LSTM
from keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
import matplotlib.pyplot as plt
#load data
filename = 'international-airline-passengers.csv'
filepath = os.path.join(os.getcwd(), filename)
dataframe = pd.read_csv(filepath,
usecols = [1],
engine = 'python')
dataset = dataframe.values
#convert dataframe to numpy array
dataset = dataset.astype('float32')
#the shape of dataset: num_samples, features
#normalise the dataset
feature_range = (0, 1)
scaler = MinMaxScaler(feature_range = feature_range)
dataset = scaler.fit_transform(dataset)
#split the dataset into training and test set
i_split = 0.8
train_size = int(len(dataset) * i_split)
#print(train_size)
test_size = len(dataset) - train_size
#print(test_size)
train_set = dataset[0:train_size, :]
test_set = dataset[train_size:, :]
#convert an array values into a dataset matrix for LSTM
def create_dataset(dataset, look_back):
dataX = []
dataY = []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i+look_back), 0]
b = dataset[i+look_back, 0]
dataX.append(a)
dataY.append(b)
dataX = np.array(dataX)
dataY = np.array(dataY)
return dataX, dataY
look_back = 1
#look_back = time_steps: the number of previous time steps
trainX, trainY = create_dataset(train_set, look_back)
testX, testY = create_dataset(test_set, look_back)
#reshape input to be [samples, time_steps, features]
time_steps = look_back
features = dataset.shape[1]
trainX = np.reshape(trainX, (trainX.shape[0], time_steps, features))
testX = np.reshape(testX, (testX.shape[0], time_steps, features))
#create and fit the LSTM
input_shape = (time_steps, features)
lstm_neurons = 4
#lstm_neurons is a hyper-parameter
dense_neurons = 1
#dense_neurions is equal to the shape of trainY(= 1)
batch_size = 1
epochs = 100
lr = 0.001
optimizer = Adam(lr = lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8, decay = 0.0, amsgrad = True)
model = Sequential()
model.add(LSTM(lstm_neurons, input_shape = input_shape, return_sequences = False))
model.add(Dense(dense_neurons, activation = 'linear'))
model.compile(loss = 'mean_squared_error', optimizer = optimizer)
model.fit(trainX,
trainY,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
shuffle = True)
#make predictions
trainPredict = model.predict(trainX, batch_size = batch_size)
testPredict = model.predict(testX, batch_size = batch_size)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
'''
the most important hyper-parameter is look_back and batch_size
researchers should try few times to determine the best values
'''
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"os.getcwd",
"keras.layers.LSTM",
"sklearn.preprocessing.MinMaxScaler",
"keras.optimizers.Adam",
"numpy.empty_like",
"keras.layers.Dense",
"numpy.array",
"numpy.reshape",
"keras.models.Sequential",
"sklearn.metrics.mean_squared_error"
] |
[((587, 638), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'usecols': '[1]', 'engine': '"""python"""'}), "(filepath, usecols=[1], engine='python')\n", (598, 638), True, 'import pandas as pd\n'), ((901, 942), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': 'feature_range'}), '(feature_range=feature_range)\n', (913, 942), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1958, 2017), 'numpy.reshape', 'np.reshape', (['trainX', '(trainX.shape[0], time_steps, features)'], {}), '(trainX, (trainX.shape[0], time_steps, features))\n', (1968, 2017), True, 'import numpy as np\n'), ((2027, 2084), 'numpy.reshape', 'np.reshape', (['testX', '(testX.shape[0], time_steps, features)'], {}), '(testX, (testX.shape[0], time_steps, features))\n', (2037, 2084), True, 'import numpy as np\n'), ((2334, 2411), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)', 'amsgrad': '(True)'}), '(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=True)\n', (2338, 2411), False, 'from keras.optimizers import Adam\n'), ((2434, 2446), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2444, 2446), False, 'from keras.models import Sequential\n'), ((3517, 3539), 'numpy.empty_like', 'np.empty_like', (['dataset'], {}), '(dataset)\n', (3530, 3539), True, 'import numpy as np\n'), ((3708, 3730), 'numpy.empty_like', 'np.empty_like', (['dataset'], {}), '(dataset)\n', (3721, 3730), True, 'import numpy as np\n'), ((3928, 3954), 'matplotlib.pyplot.plot', 'plt.plot', (['trainPredictPlot'], {}), '(trainPredictPlot)\n', (3936, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3956, 3981), 'matplotlib.pyplot.plot', 'plt.plot', (['testPredictPlot'], {}), '(testPredictPlot)\n', (3964, 3981), True, 'import matplotlib.pyplot as plt\n'), ((3983, 3993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3991, 3993), True, 'import matplotlib.pyplot as plt\n'), ((551, 562), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (560, 562), False, 'import os\n'), ((1578, 1593), 'numpy.array', 'np.array', (['dataX'], {}), '(dataX)\n', (1586, 1593), True, 'import numpy as np\n'), ((1607, 1622), 'numpy.array', 'np.array', (['dataY'], {}), '(dataY)\n', (1615, 1622), True, 'import numpy as np\n'), ((2458, 2525), 'keras.layers.LSTM', 'LSTM', (['lstm_neurons'], {'input_shape': 'input_shape', 'return_sequences': '(False)'}), '(lstm_neurons, input_shape=input_shape, return_sequences=False)\n', (2462, 2525), False, 'from keras.layers import LSTM\n'), ((2542, 2583), 'keras.layers.Dense', 'Dense', (['dense_neurons'], {'activation': '"""linear"""'}), "(dense_neurons, activation='linear')\n", (2547, 2583), False, 'from keras.layers import Dense\n'), ((3240, 3289), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['trainY[0]', 'trainPredict[:, 0]'], {}), '(trainY[0], trainPredict[:, 0])\n', (3258, 3289), False, 'from sklearn.metrics import mean_squared_error\n'), ((3361, 3408), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['testY[0]', 'testPredict[:, 0]'], {}), '(testY[0], testPredict[:, 0])\n', (3379, 3408), False, 'from sklearn.metrics import mean_squared_error\n')]
|
from django.contrib import admin
from .models import Thread, Reply
class ThreadAdmin(admin.ModelAdmin):
list_display = ['title', 'author', 'created', 'modifield']
search_fields = ['title', 'author__email', 'body']
prepopulated_fields = {'slug':('title',)}
class ReplyAdmin(admin.ModelAdmin):
list_display = ['thread', 'author', 'created', 'modifield']
search_fields = ['thread__title', 'author__email', 'reply']
admin.site.register(Thread,ThreadAdmin)
admin.site.register(Reply,ReplyAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((442, 482), 'django.contrib.admin.site.register', 'admin.site.register', (['Thread', 'ThreadAdmin'], {}), '(Thread, ThreadAdmin)\n', (461, 482), False, 'from django.contrib import admin\n'), ((482, 520), 'django.contrib.admin.site.register', 'admin.site.register', (['Reply', 'ReplyAdmin'], {}), '(Reply, ReplyAdmin)\n', (501, 520), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
import sys
import json
# add parent directory
sys.path.append(".")
from utils import get
from digital_land_frontend.render import wkt_to_json_geometry
sample_file = "docs/brownfield-land/organisation/local-authority-eng/HAG/sites.json"
def create_feature_collection(features):
return {"type": "FeatureCollection", "features": features}
def create_feature(row):
feature = {"type": "Feature"}
feature["properties"] = row
if row["point"] is not None:
feature["geometry"] = wkt_to_json_geometry(row["point"])
return feature
def convert_json_to_geojson(data):
features = []
for row in data:
features.append(create_feature(row))
return create_feature_collection(features)
def test_convert(fn):
# if file local
with open(fn) as file:
data = json.load(file)
gjson = convert_json_to_geojson(data)
with open(
f"docs/brownfield-land/organisation/local-authority-eng/HAG/sites.geojson", "w"
) as file:
file.write(json.dumps(gjson))
|
[
"sys.path.append",
"json.load",
"digital_land_frontend.render.wkt_to_json_geometry",
"json.dumps"
] |
[((71, 91), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (86, 91), False, 'import sys\n'), ((525, 559), 'digital_land_frontend.render.wkt_to_json_geometry', 'wkt_to_json_geometry', (["row['point']"], {}), "(row['point'])\n", (545, 559), False, 'from digital_land_frontend.render import wkt_to_json_geometry\n'), ((834, 849), 'json.load', 'json.load', (['file'], {}), '(file)\n', (843, 849), False, 'import json\n'), ((1031, 1048), 'json.dumps', 'json.dumps', (['gjson'], {}), '(gjson)\n', (1041, 1048), False, 'import json\n')]
|
from bsbetl import ov_helpers
import logging
import math
from datetime import date, datetime
from numpy.core.numeric import NaN
from pandas.core.indexes.base import Index
import pandas as pd
from bsbetl.alltable_calcs import Calculation
from bsbetl.alltable_calcs.at_params import at_calc_params
from bsbetl.calc_helpers import between_dates_condition, get_row_index_from_daily_df, last_trading_row_index, single_day_condition
class _2StVols_SlowDailyVols(Calculation.Calculation):
def __init__(self):
super().__init__('SlowDailyVols')
self.description = 'Modified Daily Volume calculation'
self.dependents = ['DV'] # this column we assume exists
self.at_computeds = ['DaysDVup', 'SDVBsl', 'SDVBm', 'SDVBf',
'SDVCsl', 'SDVCm', 'SDVCf1', 'SDVCf2',
'DVFDf', 'DVFf1', 'DVFf2', 'DVFm', 'DVFsl'
]
self.ov_computeds = []
def day_calculate(self, df: pd.DataFrame, share_num: str, idx: Index, prior: Index, top_up: bool, stage: int):
''' Implementation per Gunther's 210209 Calc Daily Vol Initial Stage.odt
Daily Vol 1. Make Slow Daily Vols:
Calculates the 'computeds' of single (daily) row of the df
'''
assert stage == 2, f'{self.name} calculation should only run at stage 2'
# df is assumed daily since stage 2 is asserted
# print(f'prior_idx={prior},idx={idx}')
curday_ordinal = df.index.tolist().index(idx[0])
#print(f'_2StVols_SlowDailyVols:day_calculate: curday_ordinal={curday_ordinal}')
# 1a) Slow Daily Vol Basic slow "SDVBsl":
#print(f"{idx[0]} DV= {df.at[idx[0], 'DV']}")
if (prior is None):
# first row
df.at[idx[0], 'DaysDVup'] = 0
# compute starting SlowVols figures by using average of 1st 5 days Volume
DV_avg = df.iloc[:5]['ODV'].mean(0)
df.at[idx[0],'SDVBsl'] = DV_avg
df.at[idx[0],'SDVBm'] = DV_avg
df.at[idx[0],'SDVBf'] = DV_avg
df.at[idx[0],'SDVCsl'] = DV_avg
df.at[idx[0],'SDVCm'] = DV_avg
df.at[idx[0],'SDVCf1'] = DV_avg
df.at[idx[0],'SDVCf2'] = DV_avg
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVBsl']:
# Case 1: DV D > SDVBsl D-1
# we're not on the very first row
# "DaysDVupD" is the number of days in a row the Slow Daily Vol Basic slow "SDVBsl D" increased.
up_till = between_dates_condition(df, df.index[0], prior[0])
up_tillDF = df[up_till]
#print(f"up_tillDF rows={up_tillDF.shape[0]} {df.index[0]} -> {prior[0]}")
if up_tillDF['SDVBsl'].is_monotonic_increasing:
# been increasing till this row, write the count in DaysDVup
#print(f'up_tilDF rows={up_tillDF.shape[0]}')
daysDVup = min(up_tillDF.shape[0], 50) # not more than 50
daysDVup = max(1, daysDVup) # not less than 1
df.at[idx[0], 'DaysDVup'] = daysDVup
else:
daysDVup = 1
df.at[idx[0], 'DaysDVup'] = daysDVup
# SDVB sl D = SDVBsl D-1 + YDVBsl u / DaysDVupD * ( DVD - SDVB sl D-1)eSDVBsl u
df.at[idx[0], 'SDVBsl'] = df.at[prior[0], 'SDVBsl'] + (at_calc_params['atp_YDVBslu']['setting']/daysDVup) * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVBsl']) ** at_calc_params['atp_eSDVBslu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVBsl']:
# Case 2: DVD < SDVBsl D-1
# SDVBsl D = SDVBsl D-1 - YDVBsl d * (SDVB sl D-1 - DVD)eSDVBsl d
df.at[idx[0], 'SDVBsl'] = df.at[prior[0], 'SDVBsl'] - at_calc_params['atp_YDVBsld']['setting'] * (
df.at[prior[0], 'SDVBsl']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVBsld']['setting']
# 1b) Slow Daily Vol Basic medium "SDVB m D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVBm']:
# Case 1: DVD > SDVBm D-1
# SDVBm D = SDVBm D-1 + YDVBm u * ( DVD - SDVBm D-1)eSDVBm u
df.at[idx[0], 'SDVBm'] = df.at[prior[0], 'SDVBm'] + at_calc_params['atp_YDVBmu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVBm']) ** at_calc_params['atp_eSDVBmu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVBm']:
# Case 2: DVD < SDVBm D-1
# SDVBm D = SDVBm D-1 - YDVB m d * (SDVBm D-1 - DVD)eSDVBm d
df.at[idx[0], 'SDVBm'] = df.at[prior[0], 'SDVBm'] - at_calc_params['atp_YDVBmd']['setting'] * (
df.at[prior[0], 'SDVBm']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVBmd']['setting']
# 1c) Slow Daily Vol Basic fast "SDVB bf D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVBf']:
# Case 1: DVD > SDVBf D-1
# SDVBf D = SDVBf D-1 + YDVBf u * ( DVD - SDVBf D-1)eSDVBf u
df.at[idx[0], 'SDVBf'] = df.at[prior[0], 'SDVBf'] + at_calc_params['atp_YDVBfu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVBf']) ** at_calc_params['atp_eSDVBfu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVBf']:
# Case 2: DVD < SDVBf D-1
# SDVBf D = SDVBf D-1 - YDVB f d * (SDVBf D-1 - DVD)eSDVBf d
df.at[idx[0], 'SDVBf'] = df.at[prior[0], 'SDVBf'] - at_calc_params['atp_YDVBfd']['setting'] * (
df.at[prior[0], 'SDVBf']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVBfd']['setting']
# 1d) Slow Daily Vol Compare slow "SDVCsl D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCsl']:
# Case 1: DVD > SDVCsl D-1
# SDVCsl D = SDVCsl D-1 + YDVCsl u * ( DVD - SDVCsl D-1)eSDVCsl u
df.at[idx[0], 'SDVCsl'] = df.at[prior[0], 'SDVCsl'] + at_calc_params['atp_YDVCslu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCsl']) ** at_calc_params['atp_eSDVCslu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCsl']:
# Case 2: DVD < SDVCsl D-1
# SDVCsl D = SDVCsl D-1 - YDVC sl d * (SDVCsl D-1 - DVD)eSDVCsl d
df.at[idx[0], 'SDVCsl'] = df.at[prior[0], 'SDVCsl'] - at_calc_params['atp_YDVCsld']['setting'] * (
df.at[prior[0], 'SDVCsl']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCsld']['setting']
# 1e) Slow Daily Vol Compare medium "SDVCm D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCm']:
# Case 1: DVD > SDVCm D-1
# SDVCm D = SDVCm D-1 + YDVCm u * ( DVD - SDVCm D-1)eSDVCm u
df.at[idx[0], 'SDVCm'] = df.at[prior[0], 'SDVCm'] + at_calc_params['atp_YDVCmu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCm']) ** at_calc_params['atp_eSDVCmu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCm']:
# Case 2: DVD < SDVCm D-1
# SDVCm D = SDVCm D-1 - YDVC m d * (SDVCm D-1 - DVD)eSDVCm d
df.at[idx[0], 'SDVCm'] = df.at[prior[0], 'SDVCm'] - at_calc_params['atp_YDVCmd']['setting'] * (
df.at[prior[0], 'SDVCm']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCmd']['setting']
# 1f) Slow Daily Vol Compare fast1 "SDVCf1 D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCf1']:
# Case 1: DVD > SDVCf1 D-1
# SDVCm D = SDVCf1 D-1 + YDVCf1 u * ( DVD - SDVCf1 D-1)eSDVCf1 u
df.at[idx[0], 'SDVCf1'] = df.at[prior[0], 'SDVCf1'] + at_calc_params['atp_YDVCf1u']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCf1']) ** at_calc_params['atp_eSDVCf1u']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCf1']:
# Case 2: DVD < SDVCf1 D-1
# SDVCm D = SDVCf1 D-1 - YDVC f1 d * (SDVCf1 D-1 - DVD)eSDVCf1 d
df.at[idx[0], 'SDVCf1'] = df.at[prior[0], 'SDVCf1'] - at_calc_params['atp_YDVCf1d']['setting'] * (
df.at[prior[0], 'SDVCf1']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCf1d']['setting']
# 1g) Slow Daily Vol Compare fast1 "SDVCf2 D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCf2']:
# Case 1: DVD > SDVCf2 D-1
# SDVCf2 D = SDVCf2 D-1 + YDVCf2 u * ( DVD - SDVCf2 D-1)eSDVCf2 u
df.at[idx[0], 'SDVCf2'] = df.at[prior[0], 'SDVCf2'] + at_calc_params['atp_YDVCf2u']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCf2']) ** at_calc_params['atp_eSDVCf2u']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCf2']:
# Case 2: DVD < SDVCf2 D-1
# SDVCf2 D = SDVCf2 D-1 - YDVC f2 d * (SDVCf2 D-1 - DVD)eSDVCf2 d
df.at[idx[0], 'SDVCf2'] = df.at[prior[0], 'SDVCf2'] - at_calc_params['atp_YDVCf2d']['setting'] * (
df.at[prior[0], 'SDVCf2']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCf2d']['setting']
# 1h) As in the old ShW, we need figures to show a volumes constellation, the Daily Vols Figure, "DVFxx"
if not prior is None:
# 'DVFDf' ???
# df.at[idx[0], 'DVFDf'] = df.at[idx[0], 'DV'] / df.at[idx[0], 'SDVBf']
# 'DVFf3'
if curday_ordinal >= 1:
_1_back=df.index[curday_ordinal-1]
df.at[idx[0], 'DVFf3'] = df.at[idx[0], 'DV'] / df.at[_1_back, 'SDVBf']
# 'DVFf2'
if curday_ordinal >= 2:
_2_back=df.index[curday_ordinal-2]
df.at[idx[0], 'DVFf2'] = df.at[idx[0], 'SDVCf2'] / df.at[_2_back, 'SDVBf']
# 'DVFf1'
if curday_ordinal >= 3:
_3_back=df.index[curday_ordinal-3]
df.at[idx[0], 'DVFf1'] = df.at[idx[0], 'SDVCf1'] / df.at[_3_back, 'SDVBf']
# 'DVFm'
df.at[idx[0], 'DVFm'] = df.at[idx[0], 'SDVCm'] / df.at[idx[0], 'SDVBm']
# 'DVFsl'
df.at[idx[0], 'DVFsl'] = df.at[idx[0], 'SDVCsl'] / df.at[idx[0], 'SDVBsl']
''' additional calcs performed AFTER day by day operations '''
def wrap_up(self, df, share_num, calc_dates_in_df, top_up, stage):
assert stage == 2, f'{self.name} wrap_up calculation should only run at stage 2'
# assign into Ov SDVBf.D-1, and SDVBf.D-2
try:
ov_helpers.global_ov_update(share_num, 'SDVBf.D-1', df.loc[df.index[-2],'SDVBf'])
ov_helpers.global_ov_update(share_num, 'SDVBf.D-2', df.loc[df.index[-3],'SDVBf'])
except IndexError as exc:
logging.error(f'_2StVols_SlowDailyVols wrap_up exception {exc}')
return
|
[
"logging.error",
"bsbetl.ov_helpers.global_ov_update",
"bsbetl.calc_helpers.between_dates_condition"
] |
[((10764, 10850), 'bsbetl.ov_helpers.global_ov_update', 'ov_helpers.global_ov_update', (['share_num', '"""SDVBf.D-1"""', "df.loc[df.index[-2], 'SDVBf']"], {}), "(share_num, 'SDVBf.D-1', df.loc[df.index[-2],\n 'SDVBf'])\n", (10791, 10850), False, 'from bsbetl import ov_helpers\n'), ((10858, 10944), 'bsbetl.ov_helpers.global_ov_update', 'ov_helpers.global_ov_update', (['share_num', '"""SDVBf.D-2"""', "df.loc[df.index[-3], 'SDVBf']"], {}), "(share_num, 'SDVBf.D-2', df.loc[df.index[-3],\n 'SDVBf'])\n", (10885, 10944), False, 'from bsbetl import ov_helpers\n'), ((2523, 2573), 'bsbetl.calc_helpers.between_dates_condition', 'between_dates_condition', (['df', 'df.index[0]', 'prior[0]'], {}), '(df, df.index[0], prior[0])\n', (2546, 2573), False, 'from bsbetl.calc_helpers import between_dates_condition, get_row_index_from_daily_df, last_trading_row_index, single_day_condition\n'), ((10986, 11050), 'logging.error', 'logging.error', (['f"""_2StVols_SlowDailyVols wrap_up exception {exc}"""'], {}), "(f'_2StVols_SlowDailyVols wrap_up exception {exc}')\n", (10999, 11050), False, 'import logging\n')]
|
__all__ = [
"make_mrcnn",
"mrcnn",
]
import torch
from torchvision.models.detection import MaskRCNN, maskrcnn_resnet50_fpn
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.transform import GeneralizedRCNNTransform
def make_mrcnn():
model = maskrcnn_resnet50_fpn(
num_classes=2, pretrained_backbone=True, trainable_backbone_layers=5
)
transform = GeneralizedRCNNTransform(
min_size=800, max_size=1333, image_mean=[0], image_std=[1]
)
model.transform = transform
model.backbone.body.conv1 = torch.nn.Conv2d(
1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
return model
def mrcnn():
# Get a resnet50 fpn backbone and change the first layer for grayscale
backbone = resnet_fpn_backbone("resnet50", pretrained=True, trainable_layers=5)
backbone.body.conv1 = torch.nn.Conv2d(
1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
# Make anchor generator with 3 sizes per feature map and 5 aspect ratios
sizes = tuple(2.0 ** x for x in range(5, 12))
aspects = tuple(0.5 * x for x in range(1, 5))
n_feature_maps = 5 # true for resnet50 with FPN
ag_sizes = tuple(tuple(sizes[i : i + 3]) for i in range(n_feature_maps))
ag_aspects = n_feature_maps * (aspects,)
anchor_generator = AnchorGenerator(sizes=ag_sizes, aspect_ratios=ag_aspects)
# Assemble into MaskRCNN
mrcnn = MaskRCNN(
backbone,
2,
image_mean=[0],
image_std=[1],
rpn_anchor_generator=anchor_generator,
)
return mrcnn
|
[
"torch.nn.Conv2d",
"torchvision.models.detection.rpn.AnchorGenerator",
"torchvision.models.detection.maskrcnn_resnet50_fpn",
"torchvision.models.detection.transform.GeneralizedRCNNTransform",
"torchvision.models.detection.backbone_utils.resnet_fpn_backbone",
"torchvision.models.detection.MaskRCNN"
] |
[((377, 472), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'maskrcnn_resnet50_fpn', ([], {'num_classes': '(2)', 'pretrained_backbone': '(True)', 'trainable_backbone_layers': '(5)'}), '(num_classes=2, pretrained_backbone=True,\n trainable_backbone_layers=5)\n', (398, 472), False, 'from torchvision.models.detection import MaskRCNN, maskrcnn_resnet50_fpn\n'), ((499, 587), 'torchvision.models.detection.transform.GeneralizedRCNNTransform', 'GeneralizedRCNNTransform', ([], {'min_size': '(800)', 'max_size': '(1333)', 'image_mean': '[0]', 'image_std': '[1]'}), '(min_size=800, max_size=1333, image_mean=[0],\n image_std=[1])\n', (523, 587), False, 'from torchvision.models.detection.transform import GeneralizedRCNNTransform\n'), ((662, 751), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(64)'], {'kernel_size': '(7, 7)', 'stride': '(2, 2)', 'padding': '(3, 3)', 'bias': '(False)'}), '(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3),\n bias=False)\n', (677, 751), False, 'import torch\n'), ((885, 953), 'torchvision.models.detection.backbone_utils.resnet_fpn_backbone', 'resnet_fpn_backbone', (['"""resnet50"""'], {'pretrained': '(True)', 'trainable_layers': '(5)'}), "('resnet50', pretrained=True, trainable_layers=5)\n", (904, 953), False, 'from torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n'), ((980, 1069), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(64)'], {'kernel_size': '(7, 7)', 'stride': '(2, 2)', 'padding': '(3, 3)', 'bias': '(False)'}), '(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3),\n bias=False)\n', (995, 1069), False, 'import torch\n'), ((1456, 1513), 'torchvision.models.detection.rpn.AnchorGenerator', 'AnchorGenerator', ([], {'sizes': 'ag_sizes', 'aspect_ratios': 'ag_aspects'}), '(sizes=ag_sizes, aspect_ratios=ag_aspects)\n', (1471, 1513), False, 'from torchvision.models.detection.rpn import AnchorGenerator\n'), ((1556, 1652), 'torchvision.models.detection.MaskRCNN', 'MaskRCNN', (['backbone', '(2)'], {'image_mean': '[0]', 'image_std': '[1]', 'rpn_anchor_generator': 'anchor_generator'}), '(backbone, 2, image_mean=[0], image_std=[1], rpn_anchor_generator=\n anchor_generator)\n', (1564, 1652), False, 'from torchvision.models.detection import MaskRCNN, maskrcnn_resnet50_fpn\n')]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import threading
mu = threading.Lock()
def create_sql_file():
open('sql.txt', 'w+', encoding='utf-8')
def lock_test(sql):
if mu.acquire(True):
write_to_file(sql)
mu.release()
def write_to_file(sql):
fp = open('sql.txt', 'a+')
print('write start!')
try:
fp.write(sql)
finally:
fp.close()
print('write finish!')
def read_sql_file():
fp = open('sql.txt', 'r+')
return fp.read()
|
[
"threading.Lock"
] |
[((68, 84), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (82, 84), False, 'import threading\n')]
|
"""Outgoing SMS API."""
import logging
import pkg_resources
from pyramid.renderers import render
from pyramid.settings import asbool
from pyramid_sms.utils import get_sms_backend
try:
pkg_resources.get_distribution('websauna')
from websauna.system.http import Request
from websauna.system.task.tasks import task
from websauna.system.task.tasks import ScheduleOnCommitTask
HAS_WEBSAUNA = False
except pkg_resources.DistributionNotFound:
from pyramid.request import Request
HAS_WEBSAUNA = False
from .interfaces import SMSConfigurationError
from .events import SMSSent
logger = logging.getLogger(__name__)
def _send_sms(request, receiver, text_body, sender, log_failure):
"""Perform actual SMS outbound operation through a configured service."""
service = get_sms_backend(request)
service.send_sms(receiver, text_body, sender, log_failure)
if HAS_WEBSAUNA:
# TODO: Factor this to a separate configurable module
@task(base=ScheduleOnCommitTask, bind=True)
def _send_sms_async(self, receiver, from_, text_body, log_failure):
"""Celery task to send the SMS synchronously outside HTTP request proccesing."""
request = self.request.request
_send_sms(request, receiver, from_, text_body, log_failure)
def send_sms(request: Request, receiver: str, text_body: str, sender: str=None, log_failure: bool=True, _async: bool=None, user_dialog: bool=False):
"""Send outgoing SMS message using the default configured SMS service.
Example:
.. code-block:: python
def test_sms_view(request):
'''Dummy view to simulate outgoing SMS.'''
send_sms(request, "+15551231234", "Test message")
:param receiver: Receiver's phone number as international format. You should normalize this number from all user input before passing in. See :py:mod:`pyramid_sms.utils` for examples.
:param text_body: Outbound SMS body. Usually up to 1600 characters.
:param sender: Envelope from number. Needs to be configured in the service. If none use default configured "sms.default_from".
:param log_failure: If there is an exception from the SMS backend then log this using Python logging system. Otherwise raise the error as an exception.
:param async: Force asynchronous operation through task subsystem. If ``None`` respect ``sms.async`` settings. If the operation is asynchronous, this function returns instantly and does not block HTTP request due to slow API calls to a third party service.
:param user_dialog: This SMS is part of a dialog with a known user. Use this flag to log messages with the user in your conversation dashboard. Set ``False`` to two-factor auth tokens and such.
:raise SMSConfigurationError: If configuration settings are missing
"""
if _async is None:
_async = request.registry.settings.get("sms.async")
if _async is None:
raise SMSConfigurationError("sms.async setting not defined")
_async = asbool(_async)
if sender is None:
sender = request.registry.settings.get("sms.default_sender")
if not sender:
raise SMSConfigurationError("sms.default_sender not configured")
# https://www.twilio.com/help/faq/sms/does-twilio-support-concatenated-sms-messages-or-messages-over-160-characters
if len(text_body) >= 1600:
logger.warn("Too long SMS: %s", text_body)
logger.info("Queuing sending SMS to: %s, body: %s", receiver, text_body)
# Put the actual Twilio operation async queue
if _async:
if not HAS_WEBSAUNA:
raise SMSConfigurationError("Async operations are only supported with Websauna framework")
_send_sms_async.apply_async(args=(receiver, text_body, sender, log_failure,))
else:
_send_sms(request, receiver, text_body, sender, log_failure)
request.registry.notify(SMSSent(request, receiver, text_body, sender, user_dialog))
def send_templated_sms(request: Request, template: str, context: dict, receiver: str, sender: str=None, log_failure: bool=True, _async: bool=None, user_dialog: bool=False):
"""Send out a SMS that is constructed using a page template.
Same as :py:meth:`pyramid_sms.outgoing.send_sms`, but uses templates instead of hardcoded messages.
:param request: HTTP request
:param template: Template name. Like ``welcome_sms.txt.jinja``.
:param context: Dictionary passed to template rendering engine
"""
text_body = render(template, context, request=request)
send_sms(request, receiver, text_body, sender, log_failure, _async, user_dialog)
|
[
"pkg_resources.get_distribution",
"websauna.system.task.tasks.task",
"pyramid_sms.utils.get_sms_backend",
"pyramid.settings.asbool",
"pyramid.renderers.render",
"logging.getLogger"
] |
[((610, 637), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (627, 637), False, 'import logging\n'), ((191, 233), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""websauna"""'], {}), "('websauna')\n", (221, 233), False, 'import pkg_resources\n'), ((798, 822), 'pyramid_sms.utils.get_sms_backend', 'get_sms_backend', (['request'], {}), '(request)\n', (813, 822), False, 'from pyramid_sms.utils import get_sms_backend\n'), ((968, 1010), 'websauna.system.task.tasks.task', 'task', ([], {'base': 'ScheduleOnCommitTask', 'bind': '(True)'}), '(base=ScheduleOnCommitTask, bind=True)\n', (972, 1010), False, 'from websauna.system.task.tasks import task\n'), ((4468, 4510), 'pyramid.renderers.render', 'render', (['template', 'context'], {'request': 'request'}), '(template, context, request=request)\n', (4474, 4510), False, 'from pyramid.renderers import render\n'), ((2989, 3003), 'pyramid.settings.asbool', 'asbool', (['_async'], {}), '(_async)\n', (2995, 3003), False, 'from pyramid.settings import asbool\n')]
|
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import kornia
from .voxelmorph2d import VxmDense,NCC,Grad,Dice
from monai.losses import BendingEnergyLoss,GlobalMutualInformationLoss,DiceLoss,LocalNormalizedCrossCorrelationLoss
from kornia.filters import sobel, gaussian_blur2d,canny,spatial_gradient
class LabelProp(pl.LightningModule):
@property
def automatic_optimization(self):
return False
def norm(self, x):
if len(x.shape)==4:
x = kornia.enhance.normalize_min_max(x)
elif len(x.shape)==3:
x= kornia.enhance.normalize_min_max(x[:, None, ...])[:,0, ...]
else:
x = kornia.enhance.normalize_min_max(x[None, None, ...])[0, 0, ...]
return x
def __init__(self,n_channels=1,n_classes=2,learning_rate=5e-3,weight_decay=1e-8,way='up',shape=256,selected_slices=None,losses={},by_composition=False):
super().__init__()
self.n_classes = n_classes
self.learning_rate=learning_rate
self.weight_decay=weight_decay
self.selected_slices=selected_slices #Used in validation step
if isinstance(shape,int):shape=[shape,shape]
self.registrator= VxmDense(shape,bidir=False,int_downsize=1,int_steps=7)
self.way=way #If up, learning only "forward" transitions (phi_i->j with j>i). Other choices : "down", "both". Bet you understood ;)
self.by_composition=by_composition
self.loss_model = MTL_loss(['sim','seg','comp','smooth'])
self.losses=losses
if self.by_composition: print('Using composition for training')
print('Losses',losses)
self.save_hyperparameters()
def apply_deform(self,x,field):
"""Apply deformation to x from flow field
Args:
x (Tensor): Image or mask to deform (BxCxHxW)
field (Tensor): Deformation field (Bx2xHxW)
Returns:
Tensor: Transformed image
"""
return self.registrator.transformer(x,field)
def compose_list(self,flows):
flows=list(flows)
compo=flows[-1]
for flow in reversed(flows[:-1]):
compo=self.compose_deformation(flow,compo)
return compo
def compose_deformation(self,flow_i_k,flow_k_j):
""" Returns flow_k_j(flow_i_k(.)) flow
Args:
flow_i_k
flow_k_j
Returns:
[Tensor]: Flow field flow_i_j = flow_k_j(flow_i_k(.))
"""
flow_i_j= flow_k_j+self.apply_deform(flow_i_k,flow_k_j)
return flow_i_j
def forward(self, moving,target,registration=True):
"""
Args:
moving (Tensor): Moving image (BxCxHxW)
target ([type]): Fixed image (BxCxHxW)
registration (bool, optional): If False, also return non-integrated inverse flow field. Else return the integrated one. Defaults to False.
Returns:
moved (Tensor): Moved image
field (Tensor): Deformation field from moving to target
"""
return self.registrator.forward(moving,target,registration=registration)
# def multi_level_training(self,moving,target,level=3):
# """
# Args:
# moving (Tensor): Moving image (BxCxHxW)
# target ([type]): Fixed image (BxCxHxW)
# registration (bool, optional): If False, also return non-integrated inverse flow field. Else return the integrated one. Defaults to False.
# Returns:
# moved (Tensor): Moved image
# field (Tensor): Deformation field from moving to target
# """
# stack_moved=[]
# stack_field=[]
# stack_preint=[]
# resampling=torch.nn.Upsample(size=self.shape,mode='bilinear',align_corners=True)
# for i in range(level):
# downsampling=nn.Upsample(scale_factor=1/(i+1), mode='bilinear',align_corners=True)
# downsampled_moving=downsampling(moving)
# downsampled_target=downsampling(target)
# moved,field,preint_field=self.forward(downsampled_moving,downsampled_target)
# self.compute_loss(moved,target,field=field)
# stack_moved.append(moved)
# stack_field.append(field)
# stack_preint.append(preint_field)
# return torch.stack(stack_moved,0).mean(0),torch.stack(stack_field,0).mean(0),torch.stack(stack_preint,0).mean(0)
def compute_loss(self,moved=None,target=None,moved_mask=None,target_mask=None,field=None):
"""
Args:
moved : Transformed anatomical image
target : Target anatomical image
moved_mask : Transformed mask
target_mask : Target mask
field : Velocity field (=non integrated)
"""
losses={}
if moved!=None:
# max_peak=F.conv2d(target,target).sum()
# loss_ncc=-F.conv2d(moved,target).sum()/max_peak#+NCC().loss(moved,target)
# loss_ncc=NCC().loss(moved,target)
loss_ncc=GlobalMutualInformationLoss()(moved,target)*0.8 #MONAI
# loss_ncc=LocalNormalizedCrossCorrelationLoss(spatial_dims=2, kernel_size=99)(moved,target) #MONAI
# loss_ncc=nn.MSELoss()(moved,target)
losses['sim']=loss_ncc
if moved_mask!=None:
# loss_seg= Dice().loss(moved_mask,target_mask)
loss_seg=DiceLoss(include_background=False)(moved_mask,target_mask)-1
losses['seg']=loss_seg
if field!=None:
# loss_trans=BendingEnergyLoss()(field) #MONAI
loss_trans=Grad().loss(field,field)
losses['smooth']=loss_trans
#Return dict of losses
return losses#{'sim': loss_ncc,'seg':loss_seg,'smooth':loss_trans}
def compute_contour_loss(self,img,moved_mask):
#Compute contour loss
mag,mask_contour=canny(moved_mask[:,1:2])
# edges,mag=canny(img)
return BendingEnergyLoss()(mag)
def weighting_loss(self,losses):
"""
Args:
losses (dict): Dictionary of losses
Returns:
loss (Tensor): Weighted loss
"""
def blend(self,x,y):
#For visualization
x=self.norm(x)
blended=torch.stack([y,x,x])
return blended
def training_step(self, batch, batch_nb):
X,Y=batch # X : Full scan (1x1xLxHxW) | Y : Ground truth (1xCxLxHxW)
y_opt=self.optimizers()
dices_prop=[]
Y_multi_lab=torch.clone(Y)
for lab in list(range(Y_multi_lab.shape[1]))[1:]:
chunks=[]
chunk=[]
#Binarize ground truth according to the label
Y=torch.stack([1-Y_multi_lab[:,lab],Y_multi_lab[:,lab]],dim=1)
#Identifying chunks (i->j)
for i in range(X.shape[2]):
y=Y[:,:,i,...]
if len(torch.unique(torch.argmax(y,1)))>1:
chunk.append(i)
if len(chunk)==2:
chunks.append(chunk)
chunk=[i]
if self.current_epoch==0:
print(lab,chunks)
for chunk in chunks:
y_opt.zero_grad()
#Sequences of flow fields (field_up=forward, field_down=backward)
fields_up=[]
fields_down=[]
loss_up_sim=[]
loss_up_smooth=[]
loss_down_sim=[]
loss_down_smooth=[]
loss=0
losses={'sim':None,'seg':None,'comp':None,'smooth':None}
for i in range(chunk[0],chunk[1]):
#Computing flow fields and loss for each hop from chunk[0] to chunk[1]
x1=X[:,:,i,...]
x2=X[:,:,i+1,...]
if not self.way=='down':
moved_x1,field_up,preint_field=self.forward(x1,x2,registration=False)
cur_loss=self.compute_loss(moved_x1,x2,field=preint_field)
loss_up_sim.append(cur_loss['sim'])
loss_up_smooth.append(cur_loss['smooth'])
# field_down=self.registrator.integrate(-preint_field)
# moved_x2=self.registrator.transformer(x2,field_down)
# loss_up_sim.append(self.compute_loss(moved_x2,x1)['sim'])
fields_up.append(field_up)
# if len(fields_up)>0:
# field_up_2=self.compose_deformation(fields_up[-1],field_up)
# loss_up.append(self.compute_loss(self.apply_deform(X[:,:,i-1],field_up_2),x2))
if not self.way=='up':
moved_x2,field_down,preint_field=self.forward(x2,x1,registration=False)#
fields_down.append(field_down)
moved_x2=self.registrator.transformer(x2,field_down)
cur_loss=self.compute_loss(moved_x2,x1,field=preint_field)
loss_down_sim.append(cur_loss['sim'])
loss_down_smooth.append(cur_loss['smooth'])
# field_up=self.registrator.integrate(-preint_field)
# moved_x1=self.registrator.transformer(x1,field_up)
# loss_down_sim.append(self.compute_loss(moved_x1,x2)['sim'])
# if len(fields_down)>0:
# field_down_2=self.compose_deformation(fields_down[-1],field_down)
# loss_down.append(self.compute_loss(self.apply_deform(X[:,:,i+1],field_down_2),x1))
#Better with mean
if self.way=='up':
loss=torch.stack(loss_up).mean()
elif self.way=='down':
loss=torch.stack(loss_down).mean()
else:
losses['sim']=torch.stack(loss_up_sim).mean()+torch.stack(loss_down_sim).mean()
losses['smooth']=torch.stack(loss_up_smooth).mean()+torch.stack(loss_down_smooth).mean()
# loss=(loss_up+loss_down)
# Computing registration from the sequence of flow fields
if not self.way=='down':
prop_x_up=X[:,:,chunk[0],...]
prop_y_up=Y[:,:,chunk[0],...]
composed_fields_up=self.compose_list(fields_up)
if self.by_composition:
prop_x_up=self.apply_deform(prop_x_up,composed_fields_up)
prop_y_up=self.apply_deform(prop_y_up,composed_fields_up)
else:
for i,field_up in enumerate(fields_up):
prop_x_up=self.apply_deform(prop_x_up,field_up)
prop_y_up=self.apply_deform(prop_y_up,field_up)
losses['contours']=self.compute_contour_loss(X[:,:,chunk[0]+i+1],prop_y_up)
if self.losses['compo-reg-up']:
losses['comp']=self.compute_loss(prop_x_up,X[:,:,chunk[1],...])['sim']
if self.losses['compo-dice-up']:
dice_loss=self.compute_loss(moved_mask=prop_y_up,target_mask=Y[:,:,chunk[1],...])['seg']
losses['seg']=dice_loss
dices_prop.append(dice_loss)
if not self.way=='up':
prop_x_down=X[:,:,chunk[1],...]
prop_y_down=Y[:,:,chunk[1],...]
composed_fields_down=self.compose_list(fields_down[::-1])
if self.by_composition:
prop_x_down=self.apply_deform(prop_x_down,composed_fields_down)
prop_y_down=self.apply_deform(prop_y_down,composed_fields_down)
else:
i=1
for field_down in reversed(fields_down):
prop_x_down=self.apply_deform(prop_x_down,field_down)
prop_y_down=self.apply_deform(prop_y_down,field_down)
losses['contours']+=self.compute_contour_loss(X[:,:,chunk[1]-i],prop_y_down)
i+=1
if self.losses['compo-reg-down']:
losses['comp']+=self.compute_loss(prop_x_down,X[:,:,chunk[0],...])['sim']
if self.losses['compo-dice-down']:
dice_loss=self.compute_loss(moved_mask=prop_y_down,target_mask=Y[:,:,chunk[0],...])['seg']
losses['seg']+=dice_loss
dices_prop.append(dice_loss)
#Additionnal loss to ensure sequences (images and masks) generated from "positive" and "negative" flows are equal
# if self.way=='both':
# #This helps
# if self.losses['bidir-cons-dice']:
# loss+=self.compute_loss(moved_mask=prop_y_down,target_mask=prop_y_up)
# #This breaks stuff
# if self.losses['bidir-cons-reg']:
# loss+=self.compute_loss(prop_x_up,prop_x_down)
# loss+=nn.L1Loss()(self.apply_deform(X[:,:,chunk[0],...], self.compose_deformation(composed_fields_up,composed_fields_down)),X[:,:,chunk[0],...])
# loss+=nn.L1Loss()(self.apply_deform(X[:,:,chunk[1],...], self.compose_deformation(composed_fields_down,composed_fields_up)),X[:,:,chunk[1],...])
loss=losses['seg']+losses['sim']+losses['contours']#+losses['smooth']#torch.stack([v for v in losses.values()]).mean()
# loss=self.loss_model(losses)
self.log_dict({'loss':loss},prog_bar=True)
self.manual_backward(loss)
y_opt.step()
# self.logger.experiment.add_image('x_true',X[0,:,chunk[0],...])
# self.logger.experiment.add_image('prop_x_down',prop_x_down[0,:,0,...])
# self.logger.experiment.add_image('x_true_f',X[0,:,chunk[1],...])
# self.logger.experiment.add_image('prop_x_up',prop_x_up[0,:,-1,...])
if len(dices_prop)>0:
dices_prop=-torch.stack(dices_prop).mean()
self.log('val_accuracy',dices_prop)
print(dices_prop)
else:
self.log('val_accuracy',self.current_epoch)
return loss
def register_images(self,moving,target,moving_mask):
moved,field=self.forward(moving,target,registration=True)
return moved,self.apply_deform(moving_mask,field),field
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay,amsgrad=True)
def hardmax(self,Y,dim):
return torch.moveaxis(F.one_hot(torch.argmax(Y,dim),self.n_classes), -1, dim)
class MTL_loss(torch.nn.Module):
def __init__(self, losses):
super().__init__()
start=1.
self.lw={}
self.sigmas = nn.ParameterDict()
for k in losses:
self.lw[k]= start
self.set_dict(self.lw)
def set_dict(self, dic):
self.lw = dic
for k in dic.keys():
if dic[k] > 0:
self.sigmas[k] = nn.Parameter(torch.ones(1) * dic[k])
def forward(self, loss_dict):
loss = 0
with torch.set_grad_enabled(True):
for k in loss_dict.keys():
if k in self.lw.keys():
loss +=0.5 * loss_dict[k] / (self.sigmas[k])**2 + torch.log(self.sigmas[k])
return loss
|
[
"torch.ones",
"torch.stack",
"kornia.filters.canny",
"kornia.enhance.normalize_min_max",
"torch.log",
"torch.argmax",
"torch.nn.ParameterDict",
"monai.losses.BendingEnergyLoss",
"monai.losses.GlobalMutualInformationLoss",
"torch.set_grad_enabled",
"monai.losses.DiceLoss",
"torch.clone"
] |
[((5969, 5994), 'kornia.filters.canny', 'canny', (['moved_mask[:, 1:2]'], {}), '(moved_mask[:, 1:2])\n', (5974, 5994), False, 'from kornia.filters import sobel, gaussian_blur2d, canny, spatial_gradient\n'), ((6356, 6378), 'torch.stack', 'torch.stack', (['[y, x, x]'], {}), '([y, x, x])\n', (6367, 6378), False, 'import torch\n'), ((6598, 6612), 'torch.clone', 'torch.clone', (['Y'], {}), '(Y)\n', (6609, 6612), False, 'import torch\n'), ((15208, 15226), 'torch.nn.ParameterDict', 'nn.ParameterDict', ([], {}), '()\n', (15224, 15226), False, 'from torch import nn\n'), ((536, 571), 'kornia.enhance.normalize_min_max', 'kornia.enhance.normalize_min_max', (['x'], {}), '(x)\n', (568, 571), False, 'import kornia\n'), ((6040, 6059), 'monai.losses.BendingEnergyLoss', 'BendingEnergyLoss', ([], {}), '()\n', (6057, 6059), False, 'from monai.losses import BendingEnergyLoss, GlobalMutualInformationLoss, DiceLoss, LocalNormalizedCrossCorrelationLoss\n'), ((6786, 6852), 'torch.stack', 'torch.stack', (['[1 - Y_multi_lab[:, lab], Y_multi_lab[:, lab]]'], {'dim': '(1)'}), '([1 - Y_multi_lab[:, lab], Y_multi_lab[:, lab]], dim=1)\n', (6797, 6852), False, 'import torch\n'), ((15558, 15586), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (15580, 15586), False, 'import torch\n'), ((15010, 15030), 'torch.argmax', 'torch.argmax', (['Y', 'dim'], {}), '(Y, dim)\n', (15022, 15030), False, 'import torch\n'), ((617, 666), 'kornia.enhance.normalize_min_max', 'kornia.enhance.normalize_min_max', (['x[:, None, ...]'], {}), '(x[:, None, ...])\n', (649, 666), False, 'import kornia\n'), ((707, 759), 'kornia.enhance.normalize_min_max', 'kornia.enhance.normalize_min_max', (['x[None, None, ...]'], {}), '(x[None, None, ...])\n', (739, 759), False, 'import kornia\n'), ((5126, 5155), 'monai.losses.GlobalMutualInformationLoss', 'GlobalMutualInformationLoss', ([], {}), '()\n', (5153, 5155), False, 'from monai.losses import BendingEnergyLoss, GlobalMutualInformationLoss, DiceLoss, LocalNormalizedCrossCorrelationLoss\n'), ((5489, 5523), 'monai.losses.DiceLoss', 'DiceLoss', ([], {'include_background': '(False)'}), '(include_background=False)\n', (5497, 5523), False, 'from monai.losses import BendingEnergyLoss, GlobalMutualInformationLoss, DiceLoss, LocalNormalizedCrossCorrelationLoss\n'), ((14397, 14420), 'torch.stack', 'torch.stack', (['dices_prop'], {}), '(dices_prop)\n', (14408, 14420), False, 'import torch\n'), ((15469, 15482), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (15479, 15482), False, 'import torch\n'), ((15737, 15762), 'torch.log', 'torch.log', (['self.sigmas[k]'], {}), '(self.sigmas[k])\n', (15746, 15762), False, 'import torch\n'), ((6993, 7011), 'torch.argmax', 'torch.argmax', (['y', '(1)'], {}), '(y, 1)\n', (7005, 7011), False, 'import torch\n'), ((9895, 9915), 'torch.stack', 'torch.stack', (['loss_up'], {}), '(loss_up)\n', (9906, 9915), False, 'import torch\n'), ((9987, 10009), 'torch.stack', 'torch.stack', (['loss_down'], {}), '(loss_down)\n', (9998, 10009), False, 'import torch\n'), ((10073, 10097), 'torch.stack', 'torch.stack', (['loss_up_sim'], {}), '(loss_up_sim)\n', (10084, 10097), False, 'import torch\n'), ((10105, 10131), 'torch.stack', 'torch.stack', (['loss_down_sim'], {}), '(loss_down_sim)\n', (10116, 10131), False, 'import torch\n'), ((10176, 10203), 'torch.stack', 'torch.stack', (['loss_up_smooth'], {}), '(loss_up_smooth)\n', (10187, 10203), False, 'import torch\n'), ((10211, 10240), 'torch.stack', 'torch.stack', (['loss_down_smooth'], {}), '(loss_down_smooth)\n', (10222, 10240), False, 'import torch\n')]
|
# This file is part of GenMap and released under the MIT License, see LICENSE.
# Author: <NAME>
from EvalBase import EvalBase
import networkx as nx
import os
import signal
import math
main_pid = os.getpid()
class MapHeightEval(EvalBase):
def __init__(self):
pass
@staticmethod
def eval(CGRA, app, sim_params, individual, **info):
"""Return mapping height.
Args:
CGRA (PEArrayModel): A model of the CGRA
app (Application): An application to be optimized
sim_params (SimParameters): parameters for some simulations
individual (Individual): An individual to be evaluated
Returns:
int: mapping height
"""
y_coords = []
SEs = [v for v in individual.routed_graph.nodes() if CGRA.isSE(v)]
ALUs = [v for v in individual.routed_graph.nodes() if CGRA.isALU(v)]
width, height = CGRA.getSize()
for node in SEs + ALUs:
for x in range(width):
for y in range(height):
rsc = CGRA.get_PE_resources((x, y))
if node in [v for se_set in rsc["SE"].values() for v in se_set ] or \
node == rsc["ALU"]:
y_coords.append(y)
break
map_height = max(y_coords) + 1
if "quit_minheight" in info.keys():
if info["quit_minheight"] is True:
input_count = len(set(nx.get_node_attributes(\
app.getInputSubGraph(), "input").keys()))
output_count = len(set(nx.get_node_attributes(\
app.getOutputSubGraph(), "output").keys()))
minh_op = math.ceil(len(app.getCompSubGraph().nodes()) \
/ width)
if CGRA.isIOShared():
min_maph = max(math.ceil((input_count + output_count) / 2),\
minh_op)
else:
min_maph = max(input_count, output_count, minh_op)
if min_maph == map_height and individual.isValid():
os.kill(main_pid, signal.SIGUSR1)
return map_height
@staticmethod
def isMinimize():
return True
@staticmethod
def name():
return "Mapping_Height"
|
[
"os.kill",
"os.getpid",
"math.ceil"
] |
[((199, 210), 'os.getpid', 'os.getpid', ([], {}), '()\n', (208, 210), False, 'import os\n'), ((2194, 2227), 'os.kill', 'os.kill', (['main_pid', 'signal.SIGUSR1'], {}), '(main_pid, signal.SIGUSR1)\n', (2201, 2227), False, 'import os\n'), ((1929, 1972), 'math.ceil', 'math.ceil', (['((input_count + output_count) / 2)'], {}), '((input_count + output_count) / 2)\n', (1938, 1972), False, 'import math\n')]
|
"""
Coauthors: <NAME>
<NAME>
"""
from toolbox import *
import argparse
import random
from sklearn.ensemble import RandomForestClassifier
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from sklearn.model_selection import ParameterSampler
from scipy.stats.distributions import expon
import json
def run_naive_rf():
naive_rf_kappa = []
naive_rf_ece = []
naive_rf_train_time = []
naive_rf_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (naive_rf)
for samples in samples_space:
RF = RandomForestClassifier(n_estimators=100, n_jobs=-1)
cohen_kappa, ece, train_time, test_time = run_rf_image_set(
RF,
cifar_train_images,
cifar_train_labels,
cifar_test_images,
cifar_test_labels,
samples,
classes,
)
naive_rf_kappa.append(cohen_kappa)
naive_rf_ece.append(ece)
naive_rf_train_time.append(train_time)
naive_rf_test_time.append(test_time)
print("naive_rf finished")
write_result(prefix + "naive_rf_kappa.txt", naive_rf_kappa)
write_result(prefix + "naive_rf_ece.txt", naive_rf_ece)
write_result(prefix + "naive_rf_train_time.txt", naive_rf_train_time)
write_result(prefix + "naive_rf_test_time.txt", naive_rf_test_time)
def run_cnn32():
cnn32_kappa = []
cnn32_ece = []
cnn32_train_time = []
cnn32_test_time = []
rng = np.random.RandomState(0)
param_grid = {'lr':[0.0001,0.001,0.0125,0.025],
'mo': [0.01,0.05,0.1,0.2,],
'bs': [32,64,128,256],
'wd': [0.00005,0.0001,0.0005,0.001,0.005]
}
param_list = list(ParameterSampler(param_grid, n_iter=20,
random_state=rng))
rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) for d in param_list]
outputlist=[]
total_train_time=0
for samples in samples_space:
totalaccuracy=[]
# cohen_kappa vs num training samples (cnn32)
for i in range(len(rounded_list)):
average_accuracy=0
for classes in classes_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32 = SimpleCNN32Filter(len(classes))
total_train_time=0
maxaccuracy=0
param=rounded_list[i]
lr=param['lr']
momentum=param['mo']
wd=param['wd']
batch=param['bs']
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
batch,
)
cohen_kappa, ece, train_time, test_time,accuracy = test_dn_image_es_multiple(
cnn32,
train_loader,
valid_loader,
valid_loader,
lr,
momentum,
wd,
)
total_train_time+=train_time
average_accuracy+=accuracy
average_accuracy=average_accuracy/len(classes_space)
totalaccuracy.append(average_accuracy)
yy=np.asarray(totalaccuracy)
z=np.argmax(yy)
classifier='CNN32'
num_classes=int(n_classes)
sample_size=int(samples)
outputdic=rounded_list[z].copy()
outputdic['classifier']=classifier
outputdic['number of classes']=num_classes
outputdic['sample size']=sample_size
outputlist.append(outputdic)
outputdic={}
with open("parameters.json", "w") as outfile:
for j in range(len(outputlist)):
json.dump(outputlist[j], outfile)
outfile.write("\n")
print("cnn32 finished")
write_result(prefix + "cnn32_kappa.txt", cnn32_kappa)
write_result(prefix + "cnn32_ece.txt", cnn32_ece)
write_result(prefix + "cnn32_train_time.txt", cnn32_train_time)
write_result(prefix + "cnn32_test_time.txt", cnn32_test_time)
def run_cnn32_2l():
cnn32_2l_kappa = []
cnn32_2l_ece = []
cnn32_2l_train_time = []
cnn32_2l_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32_2l)
for samples in samples_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32_2l = SimpleCNN32Filter2Layers(len(classes))
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_es(
cnn32_2l,
train_loader,
valid_loader,
test_loader,
)
cnn32_2l_kappa.append(cohen_kappa)
cnn32_2l_ece.append(ece)
cnn32_2l_train_time.append(train_time)
cnn32_2l_test_time.append(test_time)
print("cnn32_2l finished")
write_result(prefix + "cnn32_2l_kappa.txt", cnn32_2l_kappa)
write_result(prefix + "cnn32_2l_ece.txt", cnn32_2l_ece)
write_result(prefix + "cnn32_2l_train_time.txt", cnn32_2l_train_time)
write_result(prefix + "cnn32_2l_test_time.txt", cnn32_2l_test_time)
def run_cnn32_5l():
cnn32_5l_kappa = []
cnn32_5l_ece = []
cnn32_5l_train_time = []
cnn32_5l_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32_5l)
for samples in samples_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32_5l = SimpleCNN32Filter5Layers(len(classes))
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_es(
cnn32_5l,
train_loader,
valid_loader,
test_loader,
)
cnn32_5l_kappa.append(cohen_kappa)
cnn32_5l_ece.append(ece)
cnn32_5l_train_time.append(train_time)
cnn32_5l_test_time.append(test_time)
print("cnn32_5l finished")
write_result(prefix + "cnn32_5l_kappa.txt", cnn32_5l_kappa)
write_result(prefix + "cnn32_5l_ece.txt", cnn32_5l_ece)
write_result(prefix + "cnn32_5l_train_time.txt", cnn32_5l_train_time)
write_result(prefix + "cnn32_5l_test_time.txt", cnn32_5l_test_time)
def run_resnet18():
resnet18_kappa = []
resnet18_ece = []
resnet18_train_time = []
resnet18_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (resnet18)
for samples in samples_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
res = models.resnet18(pretrained=True)
num_ftrs = res.fc.in_features
res.fc = nn.Linear(num_ftrs, len(classes))
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_es(
res,
train_loader,
valid_loader,
test_loader,
)
resnet18_kappa.append(cohen_kappa)
resnet18_ece.append(ece)
resnet18_train_time.append(train_time)
resnet18_test_time.append(test_time)
print("resnet18 finished")
write_result(prefix + "resnet18_kappa.txt", resnet18_kappa)
write_result(prefix + "resnet18_ece.txt", resnet18_ece)
write_result(prefix + "resnet18_train_time.txt", resnet18_train_time)
write_result(prefix + "resnet18_test_time.txt", resnet18_test_time)
if __name__ == "__main__":
torch.multiprocessing.freeze_support()
# Example usage: python cifar_10.py -m 3
parser = argparse.ArgumentParser()
parser.add_argument("-m", help="class number")
args = parser.parse_args()
n_classes = int(args.m)
prefix = args.m + "_class/"
samples_space = np.geomspace(10, 10000, num=8, dtype=int)
nums = list(range(10))
random.shuffle(nums)
classes_space = list(combinations_45(nums, n_classes))
# normalize
scale = np.mean(np.arange(0, 256))
normalize = lambda x: (x - scale) / scale
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=None
)
cifar_train_images = normalize(cifar_trainset.data)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=None
)
cifar_test_images = normalize(cifar_testset.data)
cifar_test_labels = np.array(cifar_testset.targets)
cifar_train_images = cifar_train_images.reshape(-1, 32 * 32 * 3)
cifar_test_images = cifar_test_images.reshape(-1, 32 * 32 * 3)
#run_naive_rf()
data_transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
run_cnn32()
run_cnn32_2l()
run_cnn32_5l()
data_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
run_resnet18()
|
[
"sklearn.ensemble.RandomForestClassifier",
"json.dump",
"torchvision.models.resnet18",
"argparse.ArgumentParser",
"random.shuffle",
"torchvision.datasets.CIFAR10",
"sklearn.model_selection.ParameterSampler",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] |
[((10251, 10276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10274, 10276), False, 'import argparse\n'), ((10513, 10533), 'random.shuffle', 'random.shuffle', (['nums'], {}), '(nums)\n', (10527, 10533), False, 'import random\n'), ((10734, 10804), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(True)', 'download': '(True)', 'transform': 'None'}), "(root='./', train=True, download=True, transform=None)\n", (10750, 10804), True, 'import torchvision.datasets as datasets\n'), ((10970, 11041), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(False)', 'download': '(True)', 'transform': 'None'}), "(root='./', train=False, download=True, transform=None)\n", (10986, 11041), True, 'import torchvision.datasets as datasets\n'), ((1848, 1905), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['param_grid'], {'n_iter': '(20)', 'random_state': 'rng'}), '(param_grid, n_iter=20, random_state=rng)\n', (1864, 1905), False, 'from sklearn.model_selection import ParameterSampler\n'), ((650, 701), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'n_jobs': '(-1)'}), '(n_estimators=100, n_jobs=-1)\n', (672, 701), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4488, 4521), 'json.dump', 'json.dump', (['outputlist[j]', 'outfile'], {}), '(outputlist[j], outfile)\n', (4497, 4521), False, 'import json\n'), ((5137, 5223), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(True)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=True, download=True, transform=\n data_transforms)\n", (5153, 5223), True, 'import torchvision.datasets as datasets\n'), ((5368, 5455), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(False)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=False, download=True, transform=\n data_transforms)\n", (5384, 5455), True, 'import torchvision.datasets as datasets\n'), ((6874, 6960), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(True)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=True, download=True, transform=\n data_transforms)\n", (6890, 6960), True, 'import torchvision.datasets as datasets\n'), ((7105, 7192), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(False)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=False, download=True, transform=\n data_transforms)\n", (7121, 7192), True, 'import torchvision.datasets as datasets\n'), ((8611, 8697), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(True)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=True, download=True, transform=\n data_transforms)\n", (8627, 8697), True, 'import torchvision.datasets as datasets\n'), ((8842, 8929), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(False)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=False, download=True, transform=\n data_transforms)\n", (8858, 8929), True, 'import torchvision.datasets as datasets\n'), ((9038, 9070), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9053, 9070), True, 'import torchvision.models as models\n'), ((11376, 11397), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11395, 11397), True, 'import torchvision.transforms as transforms\n'), ((11399, 11453), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (11419, 11453), True, 'import torchvision.transforms as transforms\n'), ((11581, 11602), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11600, 11602), True, 'import torchvision.transforms as transforms\n'), ((11616, 11682), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (11636, 11682), True, 'import torchvision.transforms as transforms\n'), ((2394, 2480), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(True)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=True, download=True, transform=\n data_transforms)\n", (2410, 2480), True, 'import torchvision.datasets as datasets\n'), ((2645, 2732), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""./"""', 'train': '(False)', 'download': '(True)', 'transform': 'data_transforms'}), "(root='./', train=False, download=True, transform=\n data_transforms)\n", (2661, 2732), True, 'import torchvision.datasets as datasets\n')]
|
from typing import Dict
from pathlib import Path
from discord.ext import commands
from ..config import MAIN_DB
from .db import DatabaseConnection
# Maybe this is a little clumsy?
_CONNECTIONS: Dict[str, DatabaseConnection] = {}
def add_db(path: str, bot: commands.Bot) -> DatabaseConnection:
if path not in _CONNECTIONS:
_CONNECTIONS[path] = DatabaseConnection(path, bot)
return _CONNECTIONS[path]
def get_db(path: str=MAIN_DB) -> DatabaseConnection:
return _CONNECTIONS[MAIN_DB]
def init_db(path: str, bot: commands.Bot):
p = Path(MAIN_DB)
# Create db if it doesn't exist
if not p.exists():
# NOTE: assumes the database file resides in a subdirectory
# within the project root
#
# TODO: Actually make this not completely explode if the db file resides in
# the root directory.
p.parent.mkdir(parents=True, exist_ok=True)
p.touch()
# Connect to DB
db = add_db(path, bot)
# Add tables (if not already exists)
with open("db/vjemmie.db.sql", "r") as f:
script = f.read()
db.cursor.executescript(script)
|
[
"pathlib.Path"
] |
[((560, 573), 'pathlib.Path', 'Path', (['MAIN_DB'], {}), '(MAIN_DB)\n', (564, 573), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python3
import sys
import graphyte
import requests
def get_orgs():
with requests.get("https://codein.withgoogle.com/api/program/current/organization/") as resp:
if resp.status_code != 200:
print(f"Received status code {resp.status_code}: {resp.text}")
exit(1)
return resp.json()["results"]
def report_graphite(orgs):
for org in orgs:
name_base = f"gci.{org['program_year']}.{org['slug']}"
count = org["completed_task_instance_count"]
graphyte.send(f"{name_base}.tasks_completed", count)
def report_console(orgs):
counts = ((org["name"], org["completed_task_instance_count"]) for org in orgs)
# Sort and print by descending order of tasks completed
counts = sorted(counts, key=lambda x: x[1], reverse=True)
for org, count in counts:
print(f"{org}: {count}")
def main():
orgs = get_orgs()
report_console(orgs)
if len(sys.argv) > 1:
graphyte.init(sys.argv[1])
report_graphite(orgs)
if __name__ == '__main__':
main()
|
[
"graphyte.send",
"graphyte.init",
"requests.get"
] |
[((94, 173), 'requests.get', 'requests.get', (['"""https://codein.withgoogle.com/api/program/current/organization/"""'], {}), "('https://codein.withgoogle.com/api/program/current/organization/')\n", (106, 173), False, 'import requests\n'), ((527, 579), 'graphyte.send', 'graphyte.send', (['f"""{name_base}.tasks_completed"""', 'count'], {}), "(f'{name_base}.tasks_completed', count)\n", (540, 579), False, 'import graphyte\n'), ((971, 997), 'graphyte.init', 'graphyte.init', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (984, 997), False, 'import graphyte\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.cluster import AgglomerativeClustering
# # Organizing clusters as a hierarchical tree
# ## Grouping clusters in bottom-up fashion
np.random.seed(123)
variables = ['X', 'Y', 'Z']
labels = ['ID_0', 'ID_1', 'ID_2', 'ID_3', 'ID_4']
X = np.random.random_sample([5, 3])*10
df = pd.DataFrame(X, columns=variables, index=labels)
print(df)
# ## Performing hierarchical clustering on a distance matrix
row_dist = pd.DataFrame(squareform(pdist(df, metric='euclidean')),
columns=labels,
index=labels)
print(row_dist)
# We can either pass a condensed distance matrix (upper triangular) from the `pdist` function, or we can pass the "original" data array and define the `metric='euclidean'` argument in `linkage`. However, we should not pass the squareform distance matrix, which would yield different distance values although the overall clustering could be the same.
# 1. incorrect approach: Squareform distance matrix
#row_clusters = linkage(row_dist, method='complete', metric='euclidean')
#pd.DataFrame(row_clusters,
# columns=['row label 1', 'row label 2',
# 'distance', 'no. of items in clust.'],
# index=['cluster %d' % (i + 1)
# for i in range(row_clusters.shape[0])])
# 2. correct approach: Condensed distance matrix
row_clusters = linkage(pdist(df, metric='euclidean'), method='complete')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2',
'distance', 'no. of items in clust.'],
index=['cluster %d' % (i + 1)
for i in range(row_clusters.shape[0])])
# 3. correct approach: Input matrix
row_clusters = linkage(df.values, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2',
'distance', 'no. of items in clust.'],
index=['cluster %d' % (i + 1)
for i in range(row_clusters.shape[0])])
# make dendrogram
row_dendr = dendrogram(row_clusters,
labels=labels,
color_threshold=np.inf
)
plt.tight_layout()
plt.ylabel('Euclidean distance')
plt.show()
# ## Attaching dendrograms to a heat map
# plot row dendrogram
fig = plt.figure(figsize=(8, 8), facecolor='white')
axd = fig.add_axes([0.09, 0.1, 0.2, 0.6])
# note: for matplotlib < v1.5.1, please use orientation='right'
row_dendr = dendrogram(row_clusters, orientation='left')
# reorder data with respect to clustering
df_rowclust = df.iloc[row_dendr['leaves'][::-1]]
axd.set_xticks([])
axd.set_yticks([])
# remove axes spines from dendrogram
for i in axd.spines.values():
i.set_visible(False)
# plot heatmap
axm = fig.add_axes([0.23, 0.1, 0.6, 0.6]) # x-pos, y-pos, width, height
cax = axm.matshow(df_rowclust, interpolation='nearest', cmap='hot_r')
fig.colorbar(cax)
axm.set_xticklabels([''] + list(df_rowclust.columns))
axm.set_yticklabels([''] + list(df_rowclust.index))
plt.show()
# ## Applying agglomerative clustering via scikit-learn
ac = AgglomerativeClustering(n_clusters=3,
affinity='euclidean',
linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
ac = AgglomerativeClustering(n_clusters=2,
affinity='euclidean',
linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
|
[
"pandas.DataFrame",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.random.random_sample",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.figure",
"sklearn.cluster.AgglomerativeClustering",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.tight_layout",
"scipy.cluster.hierarchy.dendrogram"
] |
[((326, 345), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (340, 345), True, 'import numpy as np\n'), ((470, 518), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'variables', 'index': 'labels'}), '(X, columns=variables, index=labels)\n', (482, 518), True, 'import pandas as pd\n'), ((1898, 1955), 'scipy.cluster.hierarchy.linkage', 'linkage', (['df.values'], {'method': '"""complete"""', 'metric': '"""euclidean"""'}), "(df.values, method='complete', metric='euclidean')\n", (1905, 1955), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((2231, 2294), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['row_clusters'], {'labels': 'labels', 'color_threshold': 'np.inf'}), '(row_clusters, labels=labels, color_threshold=np.inf)\n', (2241, 2294), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((2366, 2384), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2382, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2417), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Euclidean distance"""'], {}), "('Euclidean distance')\n", (2395, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2426, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)', 'facecolor': '"""white"""'}), "(figsize=(8, 8), facecolor='white')\n", (2510, 2545), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2709), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['row_clusters'], {'orientation': '"""left"""'}), "(row_clusters, orientation='left')\n", (2675, 2709), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((3217, 3227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3225, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3369), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(3)', 'affinity': '"""euclidean"""', 'linkage': '"""complete"""'}), "(n_clusters=3, affinity='euclidean', linkage='complete')\n", (3313, 3369), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((3500, 3579), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(2)', 'affinity': '"""euclidean"""', 'linkage': '"""complete"""'}), "(n_clusters=2, affinity='euclidean', linkage='complete')\n", (3523, 3579), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((430, 461), 'numpy.random.random_sample', 'np.random.random_sample', (['[5, 3]'], {}), '([5, 3])\n', (453, 461), True, 'import numpy as np\n'), ((1552, 1581), 'scipy.spatial.distance.pdist', 'pdist', (['df'], {'metric': '"""euclidean"""'}), "(df, metric='euclidean')\n", (1557, 1581), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((627, 656), 'scipy.spatial.distance.pdist', 'pdist', (['df'], {'metric': '"""euclidean"""'}), "(df, metric='euclidean')\n", (632, 656), False, 'from scipy.spatial.distance import pdist, squareform\n')]
|
# coding=UTF-8
# ex:ts=4:sw=4:et
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
from mvc.support.utils import get_new_uuid
__all__ = [
"gobject",
"GtkTreeIter",
"GenericTreeModel"
"TREE_MODEL_LIST_ONLY"
]
TREE_MODEL_LIST_ONLY = 0x00
TREE_MODEL_ITERS_PERSIST = 0x00
events_pending = lambda: False
class GtkTreeIter():
def __init__(self, user_data, path=None):
self.user_data = user_data
self.path = path
pass # end of class
class GenericTreeModel(object):
__connected_signals__ = None
def __init__(self):
self.__connected_signals__ = {}
def connect(self, signal_name, handler, *args):
handlers = self.__connected_signals__.get(signal_name, {})
handler_id = get_new_uuid()
handlers[handler_id] = (handler, args)
self.__connected_signals__[signal_name] = handlers
return handler_id
def disconnect(self, signal_name, handler_id):
try:
handlers = self.__connected_signals__.get(signal_name, {})
del handlers[handler_id]
except KeyError:
pass
return
def emit(self, signal_name, args=()):
handlers = self.__connected_signals__.get(signal_name, {})
for id, (handler, user_args) in handlers.items(): # @ReservedAssignment
handler(self, *((args,) + user_args))
pass
def set_property(self, *args, **kwargs):
pass
def create_tree_iter(self, user_data):
return GtkTreeIter(user_data)
def get_path(self, itr):
return self.on_get_path(itr.user_data)
def get_iter(self, path):
return self.create_tree_iter(self.on_get_iter(path))
def row_inserted(self, path, itr):
self.emit("row-inserted", (path, itr))
def row_deleted(self, indeces):
self.emit("row-deleted", (indeces,))
def invalidate_iters(self):
pass # TOD0!
def iter_is_valid(self, itr):
return True # TODO!
def __len__(self):
return len(self._model_data)
pass # end of class
|
[
"mvc.support.utils.get_new_uuid"
] |
[((804, 818), 'mvc.support.utils.get_new_uuid', 'get_new_uuid', ([], {}), '()\n', (816, 818), False, 'from mvc.support.utils import get_new_uuid\n')]
|
# Random Pick with Weight: https://leetcode.com/problems/random-pick-with-weight/
# You are given an array of positive integers w where w[i] describes the weight of ith index (0-indexed).
# We need to call the function pickIndex() which randomly returns an integer in the range [0, w.length - 1]. pickIndex() should return the integer proportional to its weight in the w array. For example, for w = [1, 3], the probability of picking the index 0 is 1 / (1 + 3) = 0.25 (i.e 25%) while the probability of picking the index 1 is 3 / (1 + 3) = 0.75 (i.e 75%).
# More formally, the probability of picking index i is w[i] / sum(w).
# This problem is actually quite easy we keep a rolling total and then go across
# from left to right until we are over that value and return it
import random
class Solution:
def __init__(self, w):
self.curSum = 0
self.values = []
for weight in w:
self.curSum += weight
self.values.append(self.curSum)
def pickIndex(self) -> int:
if len(self.values) <= 1:
return 0
weightedPick = random() * self.curSum
for i in range(len(self.values)):
if self.values[i] > weightedPick:
return i
# Now the above runs in o(N) but we can do this in O(nlogn) as it is really easy
# to binary search through sorted numbers like the weighted sum (based off of cum sum)
def pickIndex(self) -> int:
if len(self.values) <= 1:
return 0
# Create random num for 0 .. curSum so lets use random to create a value that is from 0 .. 1 and multiply it by cursum
ourPick = random() * self.curSum
lo, hi = 0, len(self.values) - 1
while lo < hi:
mid = lo + (hi - lo) // 2
if ourPick > self.values[mid]:
lo = mid + 1
else:
hi = mid
return lo
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
# Score Card
# Did I need hints? N
# Did you finish within 30 min? 15
# Was the solution optimal? This is optimal
# Were there any bugs? No
# 5 5 5 5 = 5
|
[
"random"
] |
[((1099, 1107), 'random', 'random', ([], {}), '()\n', (1105, 1107), False, 'import random\n'), ((1639, 1647), 'random', 'random', ([], {}), '()\n', (1645, 1647), False, 'import random\n')]
|
from django.forms import BooleanField, ModelForm
from tree.forms import TreeChoiceField
from .models import DossierDEvenements
class DossierDEvenementsForm(ModelForm):
statique = BooleanField(required=False)
class Meta(object):
model = DossierDEvenements
exclude = ()
field_classes = {
'parent': TreeChoiceField,
}
class Media(object):
css = {
'all': ('css/custom_admin.css',),
}
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
if instance is not None:
initial = kwargs.get('initial', {})
initial['statique'] = instance.evenements.exists()
kwargs['initial'] = initial
super(DossierDEvenementsForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(DossierDEvenementsForm, self).clean()
if cleaned_data['categorie'] is not None \
and cleaned_data['parent'] is not None:
msg = 'Ne pas saisir de catégorie si le dossier a un parent.'
self.add_error('categorie', msg)
self.add_error('parent', msg)
evenements = cleaned_data.get('evenements')
if cleaned_data['statique']:
if not evenements:
cleaned_data['evenements'] = \
self.instance.get_queryset(dynamic=True)
self.instance.evenements.add(*evenements)
else:
cleaned_data['evenements'] = []
if self.instance.pk is not None:
self.instance.evenements.clear()
return cleaned_data
|
[
"django.forms.BooleanField"
] |
[((186, 214), 'django.forms.BooleanField', 'BooleanField', ([], {'required': '(False)'}), '(required=False)\n', (198, 214), False, 'from django.forms import BooleanField, ModelForm\n')]
|
import random
import time
n = 10000000
random_list = random.sample(range(n * 10), n)
start = time.time()
median = sorted(random_list, reverse=True)[n // 2]
end = time.time()
print(median)
print(end - start)
|
[
"time.time"
] |
[((95, 106), 'time.time', 'time.time', ([], {}), '()\n', (104, 106), False, 'import time\n'), ((164, 175), 'time.time', 'time.time', ([], {}), '()\n', (173, 175), False, 'import time\n')]
|
#!/usr/bin/env python3
import pandas as pd
dataset = pd.read_csv('Dataset.csv')
dataset.to_csv('Dataset.csv', index=False)
|
[
"pandas.read_csv"
] |
[((55, 81), 'pandas.read_csv', 'pd.read_csv', (['"""Dataset.csv"""'], {}), "('Dataset.csv')\n", (66, 81), True, 'import pandas as pd\n')]
|
import scrapy
class CompaniesSpider(scrapy.Spider):
"""This spider wil crawl all the company link available in itviec and save it
to a json line file.
"""
name = "companies"
start_urls = [
'https://itviec.com/companies',
]
def parse(self, response):
all_companies = response.xpath("//div[@class='first-group companies']/a[@class='featured-company']/@href").getall()
for company_link in all_companies:
relative_link = '/'.join(company_link.split('/') [:-1])
company_name = company_link.split('/') [-2]
absolute_link = response.urljoin(relative_link)
yield {'company_name': company_name, 'url': absolute_link }
next_page = response.xpath("//a[@class='more-jobs-link more-company']/@href").get()
# next_page now has the form of '/companies?page=2' or None
if next_page is not None:
# makes absolute url
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback = self.parse)
|
[
"scrapy.Request"
] |
[((1037, 1083), 'scrapy.Request', 'scrapy.Request', (['next_page'], {'callback': 'self.parse'}), '(next_page, callback=self.parse)\n', (1051, 1083), False, 'import scrapy\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 15:19:55 2020
@author: mi19356
"""
import numpy as np
import os
import pandas as pd
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
import random
import math
from scrape import vtk_scrap
from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream,sphericaldiam,printtofiletext
#scrape data
#orien,vtkdata,vtkdataPoints,const=vtk_scrap('PF_00189000','graindata')
dream=0
if dream==1:
orien,vtkdata,vtkdataPoints,const=vtk_scrap('PF_00130000','graindata',dream)
grainids=data_reconstruct(vtkdata, vtkdataPoints,1,orien)
else:
orien,vtkdata,const=vtk_scrap('vtkupdate','graindata',dream)
grainids,diameter=data_reconstruct_dream(vtkdata,orien)
#construct a vtk file
#vtkdatareso=reso_change(vtkdata)
"""
Create orientatio matrix
"""
def rotation_info(orien,grainids):
#Defining local variables
vec1=[0,0,1]
vec2=[0,1,0]
#modify the orientations
orien=orien[1:,1:]
#check to see if there are missing orientations
if len(orien)<len(grainids):
totaldif=len(grainids)-len(orien)
for i in range(0,int(totaldif)):
orien=np.append(orien,[random.uniform(0,2*math.pi),random.uniform(0,2*math.pi),random.uniform(0,2*math.pi)])
orien=orien.reshape(int(len(orien)/3),3)
#contruct rotation matrix
zrot=np.array([[np.cos((orien[:,0])),np.sin((orien[:,0])),np.zeros(len(orien))],[-np.sin((orien[:,0])),np.cos((orien[:,0])),np.zeros(len(orien))],[np.zeros(len(orien)),np.zeros(len(orien)),np.ones(len(orien))]])
xrot=np.array([[np.ones(len(orien)),np.zeros(len(orien)),np.zeros(len(orien))],[np.zeros(len(orien)),np.cos((orien[:,1])),np.sin((orien[:,1]))],[np.zeros(len(orien)),-np.sin((orien[:,1])),np.cos((orien[:,1]))]])
zrot2=np.array([[np.cos((orien[:,2])),np.sin((orien[:,2])),np.zeros(len(orien))],[-np.sin((orien[:,2])),np.cos((orien[:,2])),np.zeros(len(orien))],[np.zeros(len(orien)),np.zeros(len(orien)),np.ones(len(orien))]])
total_rot=[[]*len(orien)]*len(orien)
samp1=[[]*len(orien)]*len(orien)
samp2=[[]*len(orien)]*len(orien)
for i in range(0,len(orien)):
total_rot[i]=np.transpose(np.dot(np.dot(zrot2[:,:,i],xrot[:,:,i]),zrot[:,:,i]))
samp1[i]=np.dot(total_rot[i],vec1)
samp2[i]=np.dot(total_rot[i],vec2)
return vec1, vec2, samp1, samp2, total_rot, orien
"""
create material file for AMITEX
"""
def mat_create(orien,const, diameter,statev):
#rotating vectors using grain orientations
vec1,vec2,samp1,samp2,total_rot, orien=rotation_info(orien,grainids)
#use the diameter to create a variable parameter for \tau
#diameter currnetly in microns, convert to mm
#need to add 17.9 and 10 to excel const file.
diameter=(2*diameter)/1000
#writing diameters to file
printtofiletext(diameter,'diameters')
#writing orientations to file
orienprint=list(orien)
printtofiletext(orienprint,'orientations')
taud=220 + (17.9/((diameter)**0.5))
#check to make sure the there are no
#checkgreater=np.where(taud>350)[0]
#replace these values
#taud[checkgreater]=340.0
Materials = Element('Materials')
comment = Comment('REFERENCE MATERIAL')
Materials.append(comment)
child = SubElement(Materials, 'Reference_Material',Lambda0= '2.0431e+5', Mu0='0.8756e+5' )
comment = Comment('MATERIAL 1')
Materials.append(comment)
"orientation files required if material zone technique is used in AMITEX"
fsamp1 = open('fsam1.txt', 'w')
fsamp2 = open('fsam2.txt', 'w')
fsamp3 = open('fsam3.txt', 'w')
fsamp21 = open('fsam21.txt', 'w')
fsamp22 = open('fsam22.txt', 'w')
fsamp23 = open('fsam23.txt', 'w')
orien1 = open('orien1.txt', 'w')
orien2 = open('orien2.txt', 'w')
orien3 = open('orien3.txt', 'w')
tau01 = open('tau1.txt', 'w')
tau02 = open('tau2.txt', 'w')
for numMat in range(1,len(orien)+1):
for i in range(0,(len(const))):
if i==59:
const[i,0]=samp1[numMat-1][0]
fsamp1.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==60:
const[i,0]=samp1[numMat-1][1]
fsamp2.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==61:
const[i,0]=samp1[numMat-1][2]
fsamp3.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==67:
const[i,0]=samp2[numMat-1][0]
fsamp21.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==68:
const[i,0]=samp2[numMat-1][1]
fsamp22.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==69:
const[i,0]=samp2[numMat-1][2]
fsamp23.write(str("{:.16f}".format(const[i,0]))+'\n')
#adjust const array to include grain dependent info
#grain orientations
#update the value for tau0
elif i==98:
const[i,0]=taud[numMat-1]
tau01.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==114:
const[i,0]=taud[numMat-1]
tau02.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==168:
const[i,0]=(orien[numMat-1,0])
orien1.write(str(const[i,0])+'\n')
elif i==169:
const[i,0]=(orien[numMat-1,1])
orien2.write(str(const[i,0])+'\n')
elif i==170:
const[i,0]=(orien[numMat-1,2])
orien3.write(str(const[i,0])+'\n')
fsamp1.close()
fsamp2.close()
fsamp3.close()
fsamp21.close()
fsamp22.close()
fsamp23.close()
orien1.close()
orien2.close()
orien3.close()
child_grain=SubElement(Materials, 'Material', numM="1",Lib='/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so', Law='UMATBCCGDGS')
"This stores all the parameters required for the material"
"Coeff is the element of the grain material, and the atrributes are the parameter values"
"iterate across the different material constants to create subelelements for each constant2"
for i in range(0,(len(const))):
if i==59:
const[i,0]=samp1[numMat-1][0]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam1.txt")
elif i==60:
const[i,0]=samp1[numMat-1][1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam2.txt")
elif i==61:
const[i,0]=samp1[numMat-1][2]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam3.txt")
elif i==67:
const[i,0]=samp2[numMat-1][0]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam21.txt")
elif i==68:
const[i,0]=samp2[numMat-1][1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam22.txt")
elif i==69:
const[i,0]=samp2[numMat-1][2]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam23.txt")
elif i==98:
const[i,0]=taud[numMat-1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/tau1.txt")
elif i==114:
const[i,0]=taud[numMat-1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/tau2.txt")
elif i==168:
const[i,0]=(orien[numMat-1,0])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien1.txt")
elif i==169:
const[i,0]=(orien[numMat-1,1])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien2.txt")
elif i==170:
const[i,0]=(orien[numMat-1,2])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien3.txt")
else:
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant',Value=str(const[i,0]))
#iterate across the required number of state vairables needed
for i in range(0,statev):
child_grain_tail = SubElement(child_grain, 'IntVar',Index=str(i+1), Type='Constant',Value='0.')
tree = ElementTree(Materials)
tree.write("fatemptzone2.xml")
mat_create(orien,const,diameter,900)
|
[
"dataconversions.data_reconstruct_dream",
"random.uniform",
"dataconversions.printtofiletext",
"xml.etree.ElementTree.Element",
"dataconversions.data_reconstruct",
"xml.etree.ElementTree.Comment",
"numpy.sin",
"numpy.cos",
"xml.etree.ElementTree.SubElement",
"numpy.dot",
"scrape.vtk_scrap",
"xml.etree.ElementTree.ElementTree"
] |
[((547, 591), 'scrape.vtk_scrap', 'vtk_scrap', (['"""PF_00130000"""', '"""graindata"""', 'dream'], {}), "('PF_00130000', 'graindata', dream)\n", (556, 591), False, 'from scrape import vtk_scrap\n'), ((605, 655), 'dataconversions.data_reconstruct', 'data_reconstruct', (['vtkdata', 'vtkdataPoints', '(1)', 'orien'], {}), '(vtkdata, vtkdataPoints, 1, orien)\n', (621, 655), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((686, 728), 'scrape.vtk_scrap', 'vtk_scrap', (['"""vtkupdate"""', '"""graindata"""', 'dream'], {}), "('vtkupdate', 'graindata', dream)\n", (695, 728), False, 'from scrape import vtk_scrap\n'), ((750, 788), 'dataconversions.data_reconstruct_dream', 'data_reconstruct_dream', (['vtkdata', 'orien'], {}), '(vtkdata, orien)\n', (772, 788), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((3067, 3105), 'dataconversions.printtofiletext', 'printtofiletext', (['diameter', '"""diameters"""'], {}), "(diameter, 'diameters')\n", (3082, 3105), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((3179, 3222), 'dataconversions.printtofiletext', 'printtofiletext', (['orienprint', '"""orientations"""'], {}), "(orienprint, 'orientations')\n", (3194, 3222), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((3438, 3458), 'xml.etree.ElementTree.Element', 'Element', (['"""Materials"""'], {}), "('Materials')\n", (3445, 3458), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((3474, 3503), 'xml.etree.ElementTree.Comment', 'Comment', (['"""REFERENCE MATERIAL"""'], {}), "('REFERENCE MATERIAL')\n", (3481, 3503), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((3550, 3636), 'xml.etree.ElementTree.SubElement', 'SubElement', (['Materials', '"""Reference_Material"""'], {'Lambda0': '"""2.0431e+5"""', 'Mu0': '"""0.8756e+5"""'}), "(Materials, 'Reference_Material', Lambda0='2.0431e+5', Mu0=\n '0.8756e+5')\n", (3560, 3636), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((3650, 3671), 'xml.etree.ElementTree.Comment', 'Comment', (['"""MATERIAL 1"""'], {}), "('MATERIAL 1')\n", (3657, 3671), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((6156, 6313), 'xml.etree.ElementTree.SubElement', 'SubElement', (['Materials', '"""Material"""'], {'numM': '"""1"""', 'Lib': '"""/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so"""', 'Law': '"""UMATBCCGDGS"""'}), "(Materials, 'Material', numM='1', Lib=\n '/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so'\n , Law='UMATBCCGDGS')\n", (6166, 6313), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((9162, 9184), 'xml.etree.ElementTree.ElementTree', 'ElementTree', (['Materials'], {}), '(Materials)\n', (9173, 9184), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((2436, 2462), 'numpy.dot', 'np.dot', (['total_rot[i]', 'vec1'], {}), '(total_rot[i], vec1)\n', (2442, 2462), True, 'import numpy as np\n'), ((2481, 2507), 'numpy.dot', 'np.dot', (['total_rot[i]', 'vec2'], {}), '(total_rot[i], vec2)\n', (2487, 2507), True, 'import numpy as np\n'), ((1515, 1534), 'numpy.cos', 'np.cos', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1521, 1534), True, 'import numpy as np\n'), ((1536, 1555), 'numpy.sin', 'np.sin', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1542, 1555), True, 'import numpy as np\n'), ((1602, 1621), 'numpy.cos', 'np.cos', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1608, 1621), True, 'import numpy as np\n'), ((1818, 1837), 'numpy.cos', 'np.cos', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1824, 1837), True, 'import numpy as np\n'), ((1839, 1858), 'numpy.sin', 'np.sin', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1845, 1858), True, 'import numpy as np\n'), ((1905, 1924), 'numpy.cos', 'np.cos', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1911, 1924), True, 'import numpy as np\n'), ((1952, 1971), 'numpy.cos', 'np.cos', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (1958, 1971), True, 'import numpy as np\n'), ((1973, 1992), 'numpy.sin', 'np.sin', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (1979, 1992), True, 'import numpy as np\n'), ((2039, 2058), 'numpy.cos', 'np.cos', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (2045, 2058), True, 'import numpy as np\n'), ((2370, 2407), 'numpy.dot', 'np.dot', (['zrot2[:, :, i]', 'xrot[:, :, i]'], {}), '(zrot2[:, :, i], xrot[:, :, i])\n', (2376, 2407), True, 'import numpy as np\n'), ((1315, 1345), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1329, 1345), False, 'import random\n'), ((1343, 1373), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1357, 1373), False, 'import random\n'), ((1371, 1401), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1385, 1401), False, 'import random\n'), ((1581, 1600), 'numpy.sin', 'np.sin', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1587, 1600), True, 'import numpy as np\n'), ((1884, 1903), 'numpy.sin', 'np.sin', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1890, 1903), True, 'import numpy as np\n'), ((2018, 2037), 'numpy.sin', 'np.sin', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (2024, 2037), True, 'import numpy as np\n')]
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import render
from django.http import Http404
# Create your views here.
def index(request):
return render(request=request, template_name='homepage.html')
|
[
"django.shortcuts.render"
] |
[((255, 309), 'django.shortcuts.render', 'render', ([], {'request': 'request', 'template_name': '"""homepage.html"""'}), "(request=request, template_name='homepage.html')\n", (261, 309), False, 'from django.shortcuts import render\n')]
|
#!/usr/bin/env python
"""Tests for `acdh_geonames_utils` package."""
import os
import unittest
from click.testing import CliRunner
from acdh_geonames_utils import acdh_geonames_utils as gn
from acdh_geonames_utils import cli
good_country_code = 'YU'
bad_country_code = 'BAAAD'
good_ft_code = "en"
bad_ft_code = "de"
TEST_GN_FILE = os.path.join(
"./fixtures",
"AL.txt"
)
class TestAcdh_geonames_utils(unittest.TestCase):
"""Tests for `acdh_geonames_utils` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_001_download(self):
"""Test download of zip."""
good = gn.download_country_zip(good_country_code)
bad = gn.download_country_zip(bad_country_code)
self.assertTrue(good.endswith(f"{good_country_code}.zip"))
self.assertEqual(bad, "")
def test_002_download_and_unzip(self):
"""Test download and unzip."""
good = gn.download_and_unzip_country_zip(good_country_code)
bad = gn.download_and_unzip_country_zip(bad_country_code)
self.assertTrue(good.endswith(f"{good_country_code}.txt"))
self.assertEqual(bad, "")
def test_003_unzip(self):
"""Test unzipping of zip."""
bad = gn.unzip_country_zip("")
self.assertEqual(bad, "")
def test_004_file_to_df(self):
"""Test loading file into pandas.DataFrame"""
df = gn.countries_as_df(TEST_GN_FILE)
self.assertEqual(len(df), 9356)
def test_005_dl_to_df(self):
"""Test loading download into pandas.DataFrame"""
good_df = gn.download_to_df('YU')
bad_df = gn.download_to_df('YUUU')
self.assertEqual(len(good_df), 1)
self.assertFalse(bad_df)
def test_006_dl_ft(self):
good = gn.dl_feature_codes(good_ft_code)
bad = gn.dl_feature_codes(bad_ft_code)
self.assertTrue(good != "")
self.assertTrue(bad == "")
def test_007_dl_ft_as_df(self):
good = gn.feature_codes_df(good_ft_code)
bad = gn.feature_codes_df(bad_ft_code)
self.assertIsNotNone(good)
self.assertIsNone(bad)
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'acdh_geonames_utils.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
|
[
"acdh_geonames_utils.acdh_geonames_utils.download_country_zip",
"acdh_geonames_utils.acdh_geonames_utils.download_and_unzip_country_zip",
"acdh_geonames_utils.acdh_geonames_utils.dl_feature_codes",
"acdh_geonames_utils.acdh_geonames_utils.countries_as_df",
"acdh_geonames_utils.acdh_geonames_utils.download_to_df",
"click.testing.CliRunner",
"os.path.join",
"acdh_geonames_utils.acdh_geonames_utils.feature_codes_df",
"acdh_geonames_utils.acdh_geonames_utils.unzip_country_zip"
] |
[((337, 373), 'os.path.join', 'os.path.join', (['"""./fixtures"""', '"""AL.txt"""'], {}), "('./fixtures', 'AL.txt')\n", (349, 373), False, 'import os\n'), ((710, 752), 'acdh_geonames_utils.acdh_geonames_utils.download_country_zip', 'gn.download_country_zip', (['good_country_code'], {}), '(good_country_code)\n', (733, 752), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((767, 808), 'acdh_geonames_utils.acdh_geonames_utils.download_country_zip', 'gn.download_country_zip', (['bad_country_code'], {}), '(bad_country_code)\n', (790, 808), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1008, 1060), 'acdh_geonames_utils.acdh_geonames_utils.download_and_unzip_country_zip', 'gn.download_and_unzip_country_zip', (['good_country_code'], {}), '(good_country_code)\n', (1041, 1060), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1075, 1126), 'acdh_geonames_utils.acdh_geonames_utils.download_and_unzip_country_zip', 'gn.download_and_unzip_country_zip', (['bad_country_code'], {}), '(bad_country_code)\n', (1108, 1126), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1310, 1334), 'acdh_geonames_utils.acdh_geonames_utils.unzip_country_zip', 'gn.unzip_country_zip', (['""""""'], {}), "('')\n", (1330, 1334), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1472, 1504), 'acdh_geonames_utils.acdh_geonames_utils.countries_as_df', 'gn.countries_as_df', (['TEST_GN_FILE'], {}), '(TEST_GN_FILE)\n', (1490, 1504), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1655, 1678), 'acdh_geonames_utils.acdh_geonames_utils.download_to_df', 'gn.download_to_df', (['"""YU"""'], {}), "('YU')\n", (1672, 1678), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1696, 1721), 'acdh_geonames_utils.acdh_geonames_utils.download_to_df', 'gn.download_to_df', (['"""YUUU"""'], {}), "('YUUU')\n", (1713, 1721), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1843, 1876), 'acdh_geonames_utils.acdh_geonames_utils.dl_feature_codes', 'gn.dl_feature_codes', (['good_ft_code'], {}), '(good_ft_code)\n', (1862, 1876), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((1891, 1923), 'acdh_geonames_utils.acdh_geonames_utils.dl_feature_codes', 'gn.dl_feature_codes', (['bad_ft_code'], {}), '(bad_ft_code)\n', (1910, 1923), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((2047, 2080), 'acdh_geonames_utils.acdh_geonames_utils.feature_codes_df', 'gn.feature_codes_df', (['good_ft_code'], {}), '(good_ft_code)\n', (2066, 2080), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((2095, 2127), 'acdh_geonames_utils.acdh_geonames_utils.feature_codes_df', 'gn.feature_codes_df', (['bad_ft_code'], {}), '(bad_ft_code)\n', (2114, 2127), True, 'from acdh_geonames_utils import acdh_geonames_utils as gn\n'), ((2283, 2294), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2292, 2294), False, 'from click.testing import CliRunner\n')]
|
# Generated by Django 3.2.1 on 2021-08-08 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin_panel', '0016_wish_list'),
]
operations = [
migrations.AddField(
model_name='wish_list',
name='is_wished',
field=models.BooleanField(default=False),
),
]
|
[
"django.db.models.BooleanField"
] |
[((335, 369), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (354, 369), False, 'from django.db import migrations, models\n')]
|
"""
Use this script to post-process the predicted softmax segmentation.
This script performs rigid register of the softmax prediction to the subject space.
@author: <NAME> (<EMAIL>)
"""
import os
from argparse import ArgumentParser
import numpy as np
import nibabel as nib
parser = ArgumentParser()
parser.add_argument('--softmax', required=True,
help='path to the softmax prediction in the template space.')
parser.add_argument('--aff', required=True,
help='path to the Affine transformation that was used'
'to go from subject space to template space.')
parser.add_argument('--input_img', required=True,
help='Path to the SRR to preprocess')
parser.add_argument('--output_folder', required=True)
def invert_affine(aff_path, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
aff_name = os.path.split(aff_path)[1].replace('.txt', '')
save_inv_aff_path = os.path.join(
output_dir,
'%s_inv.txt' % aff_name,
)
cmd = 'reg_transform -invAff %s %s' % (aff_path, save_inv_aff_path)
os.system(cmd)
return save_inv_aff_path
def warp_softmax(softmax_path, ref_img_path, save_path, aff_path):
# Warp the softmax
cmd = 'reg_resample -ref %s -flo %s -trans %s -res %s -inter 1 -pad 0 -voff' % \
(ref_img_path, softmax_path, aff_path, save_path)
os.system(cmd)
# Fix border effects due to padding with 0 AND change order of channels
softmax_nii = nib.load(save_path)
softmax = softmax_nii.get_fdata().astype(np.float32)
sum_proba = np.sum(softmax, axis=-1)
softmax[:, :, :, 0] += 1. - sum_proba
post_softmax_nii = nib.Nifti1Image(softmax, softmax_nii.affine)
nib.save(post_softmax_nii, save_path)
def main(args):
if not os.path.exists(args.output_folder):
os.mkdir(args.output_folder)
# Compute the inverse affine transform
print('Invert %s' % args.aff)
inv_aff_path = invert_affine(aff_path=args.aff, output_dir=args.output_folder)
print(inv_aff_path)
# Warp the softmax
save_path = os.path.join(args.output_folder, 'softmax.nii.gz')
print('warp %s' % args.softmax)
warp_softmax(
softmax_path=args.softmax,
ref_img_path=args.input_img,
save_path=save_path,
aff_path=inv_aff_path,
)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
[
"nibabel.Nifti1Image",
"os.mkdir",
"numpy.sum",
"argparse.ArgumentParser",
"nibabel.load",
"os.path.exists",
"os.system",
"nibabel.save",
"os.path.split",
"os.path.join"
] |
[((285, 301), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (299, 301), False, 'from argparse import ArgumentParser\n'), ((982, 1031), 'os.path.join', 'os.path.join', (['output_dir', "('%s_inv.txt' % aff_name)"], {}), "(output_dir, '%s_inv.txt' % aff_name)\n", (994, 1031), False, 'import os\n'), ((1131, 1145), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1140, 1145), False, 'import os\n'), ((1414, 1428), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1423, 1428), False, 'import os\n'), ((1524, 1543), 'nibabel.load', 'nib.load', (['save_path'], {}), '(save_path)\n', (1532, 1543), True, 'import nibabel as nib\n'), ((1617, 1641), 'numpy.sum', 'np.sum', (['softmax'], {'axis': '(-1)'}), '(softmax, axis=-1)\n', (1623, 1641), True, 'import numpy as np\n'), ((1707, 1751), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['softmax', 'softmax_nii.affine'], {}), '(softmax, softmax_nii.affine)\n', (1722, 1751), True, 'import nibabel as nib\n'), ((1756, 1793), 'nibabel.save', 'nib.save', (['post_softmax_nii', 'save_path'], {}), '(post_softmax_nii, save_path)\n', (1764, 1793), True, 'import nibabel as nib\n'), ((2121, 2171), 'os.path.join', 'os.path.join', (['args.output_folder', '"""softmax.nii.gz"""'], {}), "(args.output_folder, 'softmax.nii.gz')\n", (2133, 2171), False, 'import os\n'), ((839, 865), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (853, 865), False, 'import os\n'), ((875, 895), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (883, 895), False, 'import os\n'), ((1823, 1857), 'os.path.exists', 'os.path.exists', (['args.output_folder'], {}), '(args.output_folder)\n', (1837, 1857), False, 'import os\n'), ((1867, 1895), 'os.mkdir', 'os.mkdir', (['args.output_folder'], {}), '(args.output_folder)\n', (1875, 1895), False, 'import os\n'), ((911, 934), 'os.path.split', 'os.path.split', (['aff_path'], {}), '(aff_path)\n', (924, 934), False, 'import os\n')]
|
"""A minimal sample script for illustration of basic usage of NCE module"""
import torch
from nce import IndexLinear
class_freq = [0, 2, 2, 3, 4, 5, 6] # an unigram class probability
freq_count = torch.FloatTensor(class_freq)
print("total counts for all tokens:", freq_count.sum())
noise = freq_count / freq_count.sum()
# IndexLinear 继承了NCELoss 类
nce_linear = IndexLinear(
embedding_dim=100, # input dim
num_classes=300000, # output dim
noise=noise,
)
# 这里 input 假装是经过了 embedding之后的
input = torch.Tensor(200, 100) # [batch, emb_dim]
# target中这里是ones, 但是我们的task中应该是 对应的正确的token的id
target = torch.ones(200, 1).long() # [batch, 1]
# training mode
loss = nce_linear(target, input).mean()
print(loss.item())
# evaluation mode for fast probability computation
nce_linear.eval()
prob = nce_linear(target, input).mean()
print(prob.item())
|
[
"torch.ones",
"torch.Tensor",
"torch.FloatTensor",
"nce.IndexLinear"
] |
[((199, 228), 'torch.FloatTensor', 'torch.FloatTensor', (['class_freq'], {}), '(class_freq)\n', (216, 228), False, 'import torch\n'), ((364, 427), 'nce.IndexLinear', 'IndexLinear', ([], {'embedding_dim': '(100)', 'num_classes': '(300000)', 'noise': 'noise'}), '(embedding_dim=100, num_classes=300000, noise=noise)\n', (375, 427), False, 'from nce import IndexLinear\n'), ((510, 532), 'torch.Tensor', 'torch.Tensor', (['(200)', '(100)'], {}), '(200, 100)\n', (522, 532), False, 'import torch\n'), ((609, 627), 'torch.ones', 'torch.ones', (['(200)', '(1)'], {}), '(200, 1)\n', (619, 627), False, 'import torch\n')]
|
"""this file is aimed to generate a small datasets for test"""
import json
f = open("/home/ayb/UVM_Datasets/voc_test3.json", "r")
line = f.readline()
f.close()
dic = eval(line)
images = dic['images']
new_images=[]
for image in images:
if "ten" in image['file_name']:
continue
else:
new_images.append(image)
image_id = []
annotations = dic['annotations']
new_annotations = []
for image in new_images:
# print(image)
image_id.append(image['id'])
for annotation in annotations:
if annotation['image_id'] in image_id:
new_annotations.append(annotation)
dic["images"] = new_images
dic["annotations"] = new_annotations
f1 = open("/home/ayb/UVM_Datasets/voc_test_not_ten.json", "w")
dic_json = json.dumps(dic)
f1.write(str(dic_json))
f1.close()
|
[
"json.dumps"
] |
[((732, 747), 'json.dumps', 'json.dumps', (['dic'], {}), '(dic)\n', (742, 747), False, 'import json\n')]
|
import os
from flask import Flask, g
from flask_sijax import sijax
path = os.path.join('.', os.path.dirname(__file__), 'static/js/sijax/')
app = Flask(__name__)
app.config['SIJAX_STATIC_PATH'] = path
app.config['SIJAX_JSON_URI'] = '/static/js/sijax/json2.js'
flask_sijax.Sijax(app)
@app.route('/')
def index():
return 'Index'
@flask_sijax.route(app, '/hello')
def hello():
def say_hi(obj_response):
obj_response.alert('Hi there!')
if g.sijax.is_sijax_request:
g.sijax.register_callback('say_hi', say_hi)
return g.sijax.process_request()
return _render_template('sijaxexample.html')
if __name__ == '__main__':
app.run(debug = True)
|
[
"os.path.dirname",
"flask.Flask",
"flask.g.sijax.register_callback",
"flask.g.sijax.process_request"
] |
[((151, 166), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (156, 166), False, 'from flask import Flask, g\n'), ((97, 122), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (112, 122), False, 'import os\n'), ((506, 549), 'flask.g.sijax.register_callback', 'g.sijax.register_callback', (['"""say_hi"""', 'say_hi'], {}), "('say_hi', say_hi)\n", (531, 549), False, 'from flask import Flask, g\n'), ((564, 589), 'flask.g.sijax.process_request', 'g.sijax.process_request', ([], {}), '()\n', (587, 589), False, 'from flask import Flask, g\n')]
|
import re
import sys
from wtforms.form import FormMeta
class WTFormsDynamicFields():
""" Add dynamic (set) fields to a WTForm.
Instantiating this class will merely create a configuration
dictionary on which you can add fields and validators using
the designated methods "add_field" and "add_validator".
Calling the "process" method will take care of
actually applying the build configuration to the WTForm form.
This method will take a WTForm form object and attach new
fields to it according to a match between what is in the POST
and what is defined in the build configuration dictionary.
It has the added ability to process sets of fields that
are suffixed with the convention of '_X' where X is a number.
For ease of configuration, these set names will be traced back
to their canonical name so that each of these fields only have
to be defined once in the configuration.
Inside the configuration there is the ability to reference
other fields within the validator arguments with the convention
of surrounding it with % signs. Fields that belong to a set
will be automatically suffixed with their set number (_X)
when they are bound to the validator.
The latter brings the power to reference set fields with their
canonical name without needing to care about the set number that
will be used later on when injecting them in the DOM.
"""
def __init__(self, flask_wtf=False):
""" Class init.
:param flask_wtf: Is this form a Flask WTF or a plain WTF instance?
"""
self._dyn_fields = {}
self.flask_wtf=flask_wtf
def add_field(self, name, label, field_type, *args, **kwargs):
""" Add the field to the internal configuration dictionary. """
if name in self._dyn_fields:
raise AttributeError('Field already added to the form.')
else:
self._dyn_fields[name] = {'label': label, 'type': field_type,
'args': args, 'kwargs': kwargs}
def add_validator(self, name, validator, *args, **kwargs):
""" Add the validator to the internal configuration dictionary.
:param name:
The field machine name to apply the validator on
:param validator:
The WTForms validator object
The rest are optional arguments and keyword arguments that
belong to the validator. We let them simply pass through
to be checked and bound later.
"""
if name in self._dyn_fields:
if 'validators' in self._dyn_fields[name]:
self._dyn_fields[name]['validators'].append(validator)
self._dyn_fields[name][validator.__name__] = {}
if args:
self._dyn_fields[name][validator.__name__]['args'] = args
if kwargs:
self._dyn_fields[name][validator.__name__]['kwargs'] = kwargs
else:
self._dyn_fields[name]['validators'] = []
self.add_validator(name, validator, *args, **kwargs)
else:
raise AttributeError('Field "{0}" does not exist. '
'Did you forget to add it?'.format(name))
@staticmethod
def iteritems(dict):
""" Refusing to use a possible memory hugging
Python2 .items() method. So for providing
both Python2 and 3 support, setting up iteritems()
as either items() in 3 or iteritems() in 2.
"""
if sys.version_info[0] >= 3:
return dict.items()
else:
return dict.iteritems()
def process(self, form, post):
""" Process the given WTForm Form object.
Itterate over the POST values and check each field
against the configuration that was made.
For each field that is valid, check all the validator
parameters for possible %field% replacement, then bind
these parameters to their validator.
Finally, add the field together with their validators
to the form.
:param form:
A valid WTForm Form object
:param post:
A MultiDict with the POST variables
"""
if not isinstance(form, FormMeta):
raise TypeError('Given form is not a valid WTForm.')
re_field_name = re.compile(r'\%([a-zA-Z0-9_]*)\%')
class F(form):
pass
for field, data in post.iteritems():
if field in F():
# Skip it if the POST field is one of the standard form fields.
continue
else:
if field in self._dyn_fields:
# If we can find the field name directly, it means the field
# is not a set so just set the canonical name and go on.
field_cname = field
# Since we are not in a set, (re)set the current set.
current_set_number = None
elif (field.split('_')[-1].isdigit()
and field[:-(len(field.split('_')[-1]))-1] in self._dyn_fields.keys()):
# If the field can be split on underscore characters,
# the last part contains only digits and the
# everything *but* the last part is found in the
# field configuration, we are good to go.
# (Cowardly refusing to use regex here).
field_cname = field[:-(len(field.split('_')[-1]))-1]
# Since we apparently are in a set, remember the
# the set number we are at.
current_set_number = str(field.split('_')[-1])
else:
# The field did not match to a canonical name
# from the fields dictionary or the name
# was malformed, throw it out.
continue
# Since the field seems to be a valid one, let us
# prepare the validator arguments and, if we are in a set
# replace the %field_name% convention where we find it.
validators = []
if 'validators' in self._dyn_fields[field_cname]:
for validator in self._dyn_fields[field_cname]['validators']:
args = []
kwargs = {}
if 'args' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
args = self._dyn_fields[field_cname]\
[validator.__name__]['args']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for arg in self._dyn_fields[field_cname]\
[validator.__name__]['args']:
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
args.append(arg)
if 'kwargs' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
kwargs = self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for key, arg in self.iteritems(self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']):
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
kwargs[key] = arg
# Finally, bind arguments to the validator
# and add it to the list
validators.append(validator(*args, **kwargs))
# The field is setup, it is time to add it to the form.
field_type = self._dyn_fields[field_cname]['type']
field_label = self._dyn_fields[field_cname]['label']
field_args = self._dyn_fields[field_cname]['args']
field_kwargs = self._dyn_fields[field_cname]['kwargs']
setattr(F, field, field_type(field_label,
validators=validators,
*field_args,
**field_kwargs))
# Create an instance of the form with the newly
# created fields and give it back to the caller.
if self.flask_wtf:
# Flask WTF overrides the form initialization
# and already injects the POST variables.
form = F()
else:
form = F(post)
return form
|
[
"re.compile"
] |
[((4393, 4428), 're.compile', 're.compile', (['"""\\\\%([a-zA-Z0-9_]*)\\\\%"""'], {}), "('\\\\%([a-zA-Z0-9_]*)\\\\%')\n", (4403, 4428), False, 'import re\n')]
|
""" Construct dataset """
import sys
import math
import pandas as pd
import numpy as np
import csv
def calc_gaps(station):
"""Calculate gaps in time series"""
df = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
df = df.set_index(['Date'])
df.index = pd.to_datetime(df.index)
dates = df.index.values
first_date = dates[0]
last_date = dates[-1]
print('Data from {0} to {1}'.format(first_date, last_date))
total_range = last_date - first_date
total_range_seconds = total_range / np.timedelta64(1, 's')
last_read_date = first_date
gaps = []
total_gap = 0;
for d in dates:
diff = d - last_read_date
seconds = diff / np.timedelta64(1, 's')
hours = diff / np.timedelta64(1, 'h')
if hours > 72: # met stations
# if hours > 24: # flow stations
total_gap = total_gap + seconds
gaps.append(seconds)
last_read_date = d
print('Number of gaps {0}'.format(len(gaps)))
years = math.floor(total_gap / 3600 / 24 / 365.25)
days = math.floor((total_gap / 3600 / 24 % 365.25))
print('Total gap {0} years'.format(total_gap / 3600 / 24 / 365.25))
print('Total gap {0} years {1} days'.format(years, days))
total_left = total_range_seconds - total_gap
years_left = math.floor(total_left / 3600 / 24 / 365.25)
days_left = math.floor((total_left / 3600 / 24 % 365.25))
print('Total left {0} years'.format(total_left / 3600 / 24 / 365.25))
print('Total left {0} years {1} days'.format(years_left, days_left))
# gap_file = '{0}-gaps.txt'.format(station)
# np.savetxt(gap_file, gaps, delimiter=',', fmt="%s")
def calc_histogram(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 10)].count()
i2 = df[(df['Value'] > 10) & (df['Value'] <= 50)].count()
i3 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i4 = df[(df['Value'] > 100) & (df['Value'] <= 200)].count()
i5 = df[(df['Value'] > 200) & (df['Value'] <= 300)].count()
i6 = df[(df['Value'] > 300) & (df['Value'] <= 400)].count()
i7 = df[(df['Value'] > 400) & (df['Value'] <= 500)].count()
i8 = df[(df['Value'] > 500) & (df['Value'] <= 1000)].count()
i9 = df[(df['Value'] > 1000)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 10: {0}'.format(i1['Value']/total_count['Value']))
print(' 10 - 50: {0}'.format(i2['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i3['Value']/total_count['Value']))
print('100 - 200: {0}'.format(i4['Value']/total_count['Value']))
print('200 - 300: {0}'.format(i5['Value']/total_count['Value']))
print('300 - 400: {0}'.format(i6['Value']/total_count['Value']))
print('400 - 500: {0}'.format(i7['Value']/total_count['Value']))
print('500 - 1000: {0}'.format(i8['Value']/total_count['Value']))
print(' > 1000: {0}'.format(i9['Value']/total_count['Value']))
def calc_histogram4(station1, station2):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram3(station1, station2, station3):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
raw3 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station3), parse_dates=['Date'])
raw3 = raw3.set_index(['Date'])
raw3.index = pd.to_datetime(raw3.index)
df3 = raw3.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value'] + df3['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram2(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 5)].count()
i2 = df[(df['Value'] > 5) & (df['Value'] <= 10)].count()
i3 = df[(df['Value'] > 10) & (df['Value'] <= 20)].count()
i4 = df[(df['Value'] > 20) & (df['Value'] <= 50)].count()
i5 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i6 = df[(df['Value'] > 100)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 5: {0}'.format(i1['Value']/total_count['Value']))
print(' 5 - 10: {0}'.format(i2['Value']/total_count['Value']))
print(' 10 - 20: {0}'.format(i3['Value']/total_count['Value']))
print(' 20 - 50: {0}'.format(i4['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i5['Value']/total_count['Value']))
print(' > 100: {0}'.format(i6['Value']/total_count['Value']))
def median_sampling_rate(station):
"""Get median over year sampling rate"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('Y').count()
df.to_csv('{0}_sample_count.csv'.format(station))
def resample(station):
"""Resample station data"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
df = df.round({'Value': 0})
df.to_csv('{0}_resampled.csv'.format(station))
if __name__ == '__main__':
station = sys.argv[1]
calc_gaps(station)
#calc_histogram(station)
#calc_histogram2(station)
#calc_histogram3('D7H014Z', 'D7H015Z', 'D7H016Z')
#calc_histogram4('D7H008', 'D7H017PLUS')
#median_sampling_rate(station)
#resample(station)
|
[
"numpy.timedelta64",
"pandas.to_datetime",
"math.floor"
] |
[((307, 331), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (321, 331), True, 'import pandas as pd\n'), ((1041, 1083), 'math.floor', 'math.floor', (['(total_gap / 3600 / 24 / 365.25)'], {}), '(total_gap / 3600 / 24 / 365.25)\n', (1051, 1083), False, 'import math\n'), ((1095, 1137), 'math.floor', 'math.floor', (['(total_gap / 3600 / 24 % 365.25)'], {}), '(total_gap / 3600 / 24 % 365.25)\n', (1105, 1137), False, 'import math\n'), ((1341, 1384), 'math.floor', 'math.floor', (['(total_left / 3600 / 24 / 365.25)'], {}), '(total_left / 3600 / 24 / 365.25)\n', (1351, 1384), False, 'import math\n'), ((1401, 1444), 'math.floor', 'math.floor', (['(total_left / 3600 / 24 % 365.25)'], {}), '(total_left / 3600 / 24 % 365.25)\n', (1411, 1444), False, 'import math\n'), ((1900, 1925), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (1914, 1925), True, 'import pandas as pd\n'), ((3577, 3603), 'pandas.to_datetime', 'pd.to_datetime', (['raw1.index'], {}), '(raw1.index)\n', (3591, 3603), True, 'import pandas as pd\n'), ((3792, 3818), 'pandas.to_datetime', 'pd.to_datetime', (['raw2.index'], {}), '(raw2.index)\n', (3806, 3818), True, 'import pandas as pd\n'), ((5561, 5587), 'pandas.to_datetime', 'pd.to_datetime', (['raw1.index'], {}), '(raw1.index)\n', (5575, 5587), True, 'import pandas as pd\n'), ((5776, 5802), 'pandas.to_datetime', 'pd.to_datetime', (['raw2.index'], {}), '(raw2.index)\n', (5790, 5802), True, 'import pandas as pd\n'), ((5991, 6017), 'pandas.to_datetime', 'pd.to_datetime', (['raw3.index'], {}), '(raw3.index)\n', (6005, 6017), True, 'import pandas as pd\n'), ((7749, 7774), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (7763, 7774), True, 'import pandas as pd\n'), ((9021, 9046), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (9035, 9046), True, 'import pandas as pd\n'), ((9337, 9362), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (9351, 9362), True, 'import pandas as pd\n'), ((558, 580), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (572, 580), True, 'import numpy as np\n'), ((726, 748), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (740, 748), True, 'import numpy as np\n'), ((772, 794), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (786, 794), True, 'import numpy as np\n')]
|
from app import app, socketio
if __name__ == '__main__':
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
socketio.run(app)
|
[
"app.socketio.run"
] |
[((146, 163), 'app.socketio.run', 'socketio.run', (['app'], {}), '(app)\n', (158, 163), False, 'from app import app, socketio\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import keras
import keras.layers as klayers
import time_series as tsutils
import processing
import metrics
class ModelBase(object):
# Required 'context' information for a model
input_window = None
# How many point the model can predict for a single given context
output_window = None
# How output is shifted w.r.t. to input window
offset = 1
class Model(ModelBase):
def __init__(self,
input_shape: tuple = (5, 1),
outputs: int = 1):
self.input_window = input_shape[0]
self.output_window = outputs
self.offset = outputs
model = keras.Sequential()
model.add(klayers.Conv1D(10, input_shape=input_shape, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Conv1D(10, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Conv1D(10, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Flatten())
model.add(klayers.Dense(outputs))
#model.add(klayers.Dense(10, input_shape=input_shape))
#model.add(klayers.Dense(outputs))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
self.model = model
def predict(self, x, *args, **kwargs):
return self.model.predict(x, *args, **kwargs)
def train(self, x, y, *args, **kwargs):
self.model.fit(x, y, *args, **kwargs)
def main():
path = 'D:\\data\\M3\\M3Other\\N2836.csv'
data = np.genfromtxt(path)
print('Data len: {0}'.format(len(data)))
predict_points = 8
model = Model()
ts = tsutils.TimeSeries(data, test_size=predict_points, scaler=processing.StandardScaler())
x_train, y_train, t_train = ts.train_data(input_window=model.input_window, output_window=model.output_window, expand=True)
model.train(x_train, y_train, epochs=200)
#x_test, y_test, t_test = ts.train_data(input_window=model.input_window, output_window=model.output_window)
ctx = np.expand_dims(ts.get_test_context(model.input_window, expand=True), axis=0)
y_pred = tsutils.free_run_batch(model.predict, ctx, predict_points, ts, batch_size=1)
y_true = ts.get_test_data()
y_pred_flat = ts.inverse_y(np.squeeze(y_pred))
y_true_flat = ts.inverse_y(np.squeeze(y_true))
print(metrics.evaluate(y_true_flat, y_pred_flat, metrics=('smape', 'mae', 'umbrae')))
'''
x_all, y_all, t_all = ts.train_data(input_window=model.input_window, output_window=model.output_window)
y_all_pred = model.predict(x_all)
t_all_flat = ts.inverse_y(np.squeeze(t_all))
y_all_flat = ts.inverse_y(np.squeeze(y_all))
y_pred_pred_flat = ts.inverse_y(np.squeeze(y_all_pred))
plt.plot(t_all_flat, y_all_flat)
plt.plot(t_all_flat, y_pred_pred_flat)
plt.show()
'''
#y_free_run_flat = np.squeeze(predictions)
#plt.plot(np.reshape(y_all, (-1, )))
#plt.plot(np.concatenate((y_pred_flat, y_free_run_flat)))
#plt.show()
if __name__ == '__main__':
main()
|
[
"time_series.free_run_batch",
"processing.StandardScaler",
"keras.Sequential",
"keras.layers.Flatten",
"numpy.genfromtxt",
"keras.layers.Conv1D",
"metrics.evaluate",
"keras.layers.Dense",
"numpy.squeeze"
] |
[((1529, 1548), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {}), '(path)\n', (1542, 1548), True, 'import numpy as np\n'), ((2123, 2199), 'time_series.free_run_batch', 'tsutils.free_run_batch', (['model.predict', 'ctx', 'predict_points', 'ts'], {'batch_size': '(1)'}), '(model.predict, ctx, predict_points, ts, batch_size=1)\n', (2145, 2199), True, 'import time_series as tsutils\n'), ((680, 698), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (696, 698), False, 'import keras\n'), ((2264, 2282), 'numpy.squeeze', 'np.squeeze', (['y_pred'], {}), '(y_pred)\n', (2274, 2282), True, 'import numpy as np\n'), ((2315, 2333), 'numpy.squeeze', 'np.squeeze', (['y_true'], {}), '(y_true)\n', (2325, 2333), True, 'import numpy as np\n'), ((2346, 2424), 'metrics.evaluate', 'metrics.evaluate', (['y_true_flat', 'y_pred_flat'], {'metrics': "('smape', 'mae', 'umbrae')"}), "(y_true_flat, y_pred_flat, metrics=('smape', 'mae', 'umbrae'))\n", (2362, 2424), False, 'import metrics\n'), ((717, 814), 'keras.layers.Conv1D', 'klayers.Conv1D', (['(10)'], {'input_shape': 'input_shape', 'padding': '"""same"""', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(10, input_shape=input_shape, padding='same', kernel_size=3,\n activation='relu')\n", (731, 814), True, 'import keras.layers as klayers\n'), ((830, 898), 'keras.layers.Conv1D', 'klayers.Conv1D', (['(10)'], {'padding': '"""same"""', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(10, padding='same', kernel_size=3, activation='relu')\n", (844, 898), True, 'import keras.layers as klayers\n'), ((918, 986), 'keras.layers.Conv1D', 'klayers.Conv1D', (['(10)'], {'padding': '"""same"""', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(10, padding='same', kernel_size=3, activation='relu')\n", (932, 986), True, 'import keras.layers as klayers\n'), ((1006, 1023), 'keras.layers.Flatten', 'klayers.Flatten', ([], {}), '()\n', (1021, 1023), True, 'import keras.layers as klayers\n'), ((1043, 1065), 'keras.layers.Dense', 'klayers.Dense', (['outputs'], {}), '(outputs)\n', (1056, 1065), True, 'import keras.layers as klayers\n'), ((1706, 1733), 'processing.StandardScaler', 'processing.StandardScaler', ([], {}), '()\n', (1731, 1733), False, 'import processing\n')]
|
"""
Contains styling utilities for tkinter widgets.
Some features include:
- a hierarchical styling system for the non-ttk widgets;
- a collection of colour constants;
- and reasonable cross-platform named fonts.
"""
import tkinter as tk
import tkinter.font as tkfont
from contextlib import contextmanager
# Colour constants:
GRAY_SCALE_0 = "#000000"
GRAY_SCALE_1 = "#111111"
GRAY_SCALE_2 = "#222222"
GRAY_SCALE_3 = "#333333"
GRAY_SCALE_4 = "#444444"
GRAY_SCALE_5 = "#555555"
GRAY_SCALE_6 = "#666666"
GRAY_SCALE_7 = "#777777"
GRAY_SCALE_8 = "#888888"
GRAY_SCALE_9 = "#999999"
GRAY_SCALE_A = "#AAAAAA"
GRAY_SCALE_B = "#BBBBBB"
GRAY_SCALE_C = "#CCCCCC"
GRAY_SCALE_D = "#DDDDDD"
GRAY_SCALE_E = "#EEEEEE"
GRAY_SCALE_F = "#FFFFFF"
MUTE_BLUE = "#333355"
MUTE_GREEN = "#335533"
MUTE_RED = "#663333"
MUTE_YELLOW = "#888833"
MUTE_TURQUOISE = "#335555"
MUTE_PURPLE = "#553377"
MUTE_PINK = "#663366"
MUTE_ORANGE = "#774433"
RED_TEAM = "#992222"
BLUE_TEAM = "#222299"
UNKNOWN_TEAM = GRAY_SCALE_B
BOOL_TRUE = MUTE_GREEN
BOOL_FALSE = MUTE_RED
# Named fonts:
FONT_MONOSPACE_TITLE = None
"""
Tkinter named font with these properties:
- size: 10
- family: "Courier"
- weight: BOLD
``init_fonts`` must be called to initialize this font.
"""
FONT_MONOSPACE_NORMAL = None
"""
Tkinter named font with these properties:
- size: 8
- family: "Courier"
``init_fonts`` must be called to initialize this font.
"""
FONT_SERIF_TITLE = None
"""
Tkinter named font with these properties:
- size: 10
- family: "Times"
- weight: BOLD
``init_fonts`` must be called to initialize this font.
"""
FONT_SERIF_NORMAL = None
"""
Tkinter named font with these properties:
- size: 8
- family: "Times"
``init_fonts`` must be called to initialize this font.
"""
FONT_SANS_SERIF_TITLE = None
"""
Tkinter named font with these properties:
- size: 10
- family: "Helvetica"
- weight: BOLD
``init_fonts`` must be called to initialize this font.
"""
FONT_SANS_SERIF_NORMAL = None
"""
Tkinter named font with these properties:
- size: 8
- family: "Helvetica"
``init_fonts`` must be called to initialize this font.
"""
# Backup copies of "normal" tkinter widgets:
_tkButton = tk.Button
_tkCanvas = tk.Canvas
_tkCheckbutton = tk.Checkbutton
_tkEntry = tk.Entry
_tkFrame = tk.Frame
_tkLabel = tk.Label
_tkLabelFrame = tk.LabelFrame
_tkListbox = tk.Listbox
_tkMenu = tk.Menu
_tkPanedWindow = tk.PanedWindow
_tkRadiobutton = tk.Radiobutton
_tkScale = tk.Scale
_tkScrollbar = tk.Scrollbar
_tkSpinbox = tk.Spinbox
_tkText = tk.Text
_tkToplevel = tk.Toplevel
_global_style = None
"""A global ``Style`` object used by the ``stylize`` context manager."""
class StyleableMixin:
"""
Mixin class used to make a widget "styleable". This class works in cooperation with the ``Style`` class. Styleable
widgets should never use their ``widget.configure`` method to set styles in their ``StyleableMixin.STYLED_OPTS``;
``StyleableMixin.apply_style`` should be used instead. (Although configuring "functional" styles through
``widget.configure`` is perfect fine.)
There are four sources of style options and they work on a priority system (higher number means higher priority):
1. ``StyleableMixin.TK_DEFAULT_STYLES``
2. ``StyleableMixin.DEFAULT_STYLES``
3. A given ``Style`` instance
4. A given dictionary of overrides
"""
STYLED_OPTS = []
"""
A list of strings specifying all the widget options (i.e. the ones that would normally be passed to
``widget.configure(...)``) to be considered for styling; any other options encountered are considered "functional"
and hence won't be regulated in any way by this class. Subclasses should define this.
"""
TK_DEFAULT_STYLES = None
"""
A dictionary of default (platform-specific) styles to revert to if an initially explicitly given style option is
revoked. This dictionary is lazily built for each unique styled class (i.e. a style is added to this dictionary the
first time it changes from its default).
"""
DEFAULT_STYLES = None
"""
A dictionary of default user-defined styles for a given class. Subclasses may define this. One may also set this at
runtime through ``StyleableMixin.set_defaults``, but any changes made won't be taken into effect on instances of
that class until ``StyleableMixin.update_style`` is called.
"""
def __init__(self, master=None, cnf={}, *args, style=None, **overrides):
"""
:param master: The master tkinter widget.
:param cnf: A dictionary of configuration options. This is here to mimic the tkinter widget constructors.
:type cnf: dict
:param args: Additional args for the widget constructor.
:param style: An initial style to employ.
:type style: Style
:param overrides: Style overrides to use.
"""
super().__init__(master, cnf, *args)
self._style = None
"""The widget's current ``Style``."""
self._overrides = None
"""A dictionary of the widget's currently-overridden styles."""
self._assure_default_dicts_exist()
# Initialize the widget's style to the given style or the global style, which may be set by the stylize context
# manger.
self.apply_style(style or _global_style, **overrides)
def apply_style(self, style=None, *, keep_style=False, keep_overrides=False, **overrides):
"""
Apply the given style with the given overrides.
:param style: The style to employ, or None to clear the current style (if ``keep_style`` is False).
:type style: Style
:param keep_style: If ``style`` is None, setting this will keep the previous style. Does nothing if ``style`` is
given.
:type keep_style: bool
:param keep_overrides: Whether to append the given ``overrides`` to the already existing overridden styles, or
replace them.
:type keep_overrides: bool
:param overrides: Style overrides to use.
"""
# Sort out the functional options from the styled ones.
functional, styled = {}, {}
for k, v in overrides.items():
if k in self.STYLED_OPTS:
styled[k] = v
else:
functional[k] = v
# Directly apply the functional options
self.configure(functional)
if keep_overrides:
self._overrides.update(styled)
else:
self._overrides = styled
if style:
if self._style:
self._style.unregister_styleable(self)
self._style = style
style.register_styleable(self)
elif self._style and not keep_style:
self._style.unregister_styleable(self)
self._style = None
self.update_style()
def update_style(self):
"""Update this widget's styles."""
# Alias TK_DEFAULT_STYLES for conciseness.
tk_defaults = self.__class__.TK_DEFAULT_STYLES
# Start off the styles_dict with a copy of the tk_defaults, since those styles are of lowest priority (1). We
# will update the dict with increasing style priority so that lower priority styles will get overridden.
styles_dict = tk_defaults.copy()
# Update the dict with the class-specific user-provided defaults. (Priority 2)
styles_dict.update(self.__class__.DEFAULT_STYLES)
if self._style:
# Update the dict with the styles from the Style object. (Priority 3)
styles_dict.update(self._style.get_relevant_styles(self))
# Update the dict with the overridden styles. (Priority 4)
styles_dict.update(self._overrides)
# Before we actually configure the widget, save any of the styles set to this widget by default so we may return
# to them if an explicit style option on this widget is removed.
tk_defaults.update((k, self.cget(k)) for k in styles_dict if k not in tk_defaults)
self.configure(styles_dict)
@classmethod
def _assure_default_dicts_exist(cls):
"""
Make sure that this class's ``StyleableMixin.TK_DEFAULT_STYLES`` and ``StyleableMixin.DEFAULT_STYLES`` are
defined (every class needs its own version of these; if they were initialized to an empty dict in
``StyleableMixin`` then all classes would share the same dictionaries).
"""
if cls.TK_DEFAULT_STYLES is None:
cls.TK_DEFAULT_STYLES = {}
if cls.DEFAULT_STYLES is None:
cls.DEFAULT_STYLES = {}
@classmethod
def set_defaults(cls, keep_existing=True, **defaults):
"""
Convenience method to update the default styles for this class.
:param keep_existing: Whether to keep the already existing default styles, or replace them.
:type keep_existing: bool
:param defaults: A dictionary of default styles.
"""
cls._assure_default_dicts_exist()
if keep_existing:
cls.DEFAULTS.update(defaults)
else:
cls.DEFAULTS = defaults
# TODO: Styleable Tk (root)?
class Button(StyleableMixin, tk.Button):
"""Styleable version of ``tkinter.Button``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "overrelief", "justify"]
class Canvas(StyleableMixin, tk.Canvas):
"""Styleable version of ``tkinter.Canvas``."""
STYLED_OPTS = ["bg", "bd", "selectbackground", "selectborderwidth", "selectforeground", "highlightcolor",
"highlightbackground", "highlightthickness", "relief"]
class Checkbutton(StyleableMixin, tk.Checkbutton):
"""Styleable version of ``tkinter.Checkbutton``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "overrelief", "justify",
"indicatoron", "offrelief", "selectcolor"]
class Entry(StyleableMixin, tk.Entry):
"""Styleable version of ``tkinter.Entry``."""
STYLED_OPTS = ["font", "bg", "disabledbackground", "fg", "disabledforeground", "readonlybackground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "justify",
"selectbackground", "selectborderwidth", "selectforeground"]
class Frame(StyleableMixin, tk.Frame):
"""Styleable version of ``tkinter.Frame``."""
STYLED_OPTS = ["bg", "bd", "highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Label(StyleableMixin, tk.Label):
"""Styleable version of ``tkinter.Label``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "justify"]
class LabelFrame(StyleableMixin, tk.LabelFrame):
"""Styleable version of ``tkinter.LabelFrame``."""
STYLED_OPTS = ["font", "bg", "fg", "bd", "highlightcolor", "highlightbackground", "highlightthickness", "relief",
"labelanchor"]
class Listbox(StyleableMixin, tk.Listbox):
"""Styleable version of ``tkinter.Listbox``."""
STYLED_OPTS = ["font", "bg", "activestyle", "fg", "disabledforeground", "bd", "relief", "highlightcolor",
"highlightbackground", "highlightthickness", "selectbackground", "selectborderwidth",
"selectforeground"]
class Menu(StyleableMixin, tk.Menu):
"""Styleable version of ``tkinter.Menu``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"selectcolor", "relief", "activeborderwidth"]
class PanedWindow(StyleableMixin, tk.PanedWindow):
"""Styleable version of ``tkinter.PanedWindow``."""
STYLED_OPTS = ["bg", "bd", "relief", "sashrelief", "showhandle"]
class Radiobutton(StyleableMixin, tk.Radiobutton):
"""Styleable version of ``tkinter.Radiobutton``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "overrelief", "justify",
"indicatoron", "offrelief", "selectcolor"]
class Scale(StyleableMixin, tk.Scale):
"""Styleable version of ``tkinter.Scale``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "bd", "showvalue", "sliderrelief", "troughcolor",
"highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Scrollbar(StyleableMixin, tk.Scrollbar):
"""Styleable version of ``tkinter.Scrollbar``."""
STYLED_OPTS = ["bg", "activebackground", "activerelief", "bd", "elementborderwidth", "troughcolor",
"highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Spinbox(StyleableMixin, tk.Spinbox):
"""Styleable version of ``tkinter.Spinbox``."""
STYLED_OPTS = ["font", "bg", "disabledbackground", "fg", "disabledforeground", "readonlybackground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "justify",
"selectbackground", "selectborderwidth", "selectforeground", "buttonbackground",
"buttondownrelief", "buttonuprelief", "insertbackground", "insertborderwidth"]
class Text(StyleableMixin, tk.Text):
"""Styleable version of ``tkinter.Text``."""
STYLED_OPTS = ["font", "bg", "fg", "bd", "insertbackground", "insertborderwidth", "highlightcolor",
"highlightbackground", "highlightthickness", "relief", "selectbackground", "selectborderwidth",
"selectforeground"]
class Toplevel(StyleableMixin, tk.Toplevel):
"""Styleable version of ``tkinter.Toplevel``."""
STYLED_OPTS = ["bg", "bd", "highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Style:
"""
A dictionary proxy for tkinter widget styles. ``StyleableMixin``s register themselves to ``Style``s so that whenever
a ``Style`` is updated, any registered ``StyleableMixin``s are automatically updated to reflect the changes.
``Style``s employ a parent-child system in which a ``Style`` can have one or more parents to inherit styles from.
When a style is requested from a ``Style`` and cannot be found in said ``Style``'s own styles, the style is looked
for in its ancestors, prioritizing the first ones specified in the constructor. When a ``Style`` is updated, all
child ``Style``s of the changed ``Style`` are recursively informed of the change.
"""
DEFAULTS = {}
"""Global default styles for all ``Style`` objects. This should be set through ``Style.set_defaults``."""
def __init__(self, *parents, **styles):
"""
:param parents: ``Style``s to inherit styles from.
:param styles: Styles to use.
"""
self._dict = styles
"""A dictionary of the styles specific to this ``Style``."""
self._styled = []
"""
A list of registered ``StyleableMixin``s. These are signaled of any changes to this ``Style`` in
``Style._signal_style_changed``.
"""
self._parents = parents
"""A list of this ``Style``'s parent ``Style``s."""
self._children = []
"""
A list of registered child ``Style``s. These are signaled of any changes to this ``Style`` in
``Style._signal_style_changed``.
"""
for parent in parents:
parent._register_child(self)
def register_styleable(self, styleable):
"""
Called by ``StyleableMixin`` objects to receive updates on whenever this style changes. This should not be
called by user code.
:param styleable: The styleable widget to register.
:type styleable: StyleableMixin
"""
self._styled.append(styleable)
def unregister_styleable(self, styleable):
"""
Called by ``StyleableMixin`` objects to stop receiving updates on whenever this style changes. This should not
be called by user code.
:param styleable: The styleable widget to unregister.
:type styleable: StyleableMixin
"""
# This will raise an error if the styleable is not already registered.
self._styled.remove(styleable)
def _register_child(self, style):
"""
Called by child ``Style``s to receive updates on whenever this style changes.
:param style: The child ``Style``.
:type style: Style
"""
self._children.append(style)
# Keep the same naming scheme as tkinter.
def configure(self, **kwargs):
"""
Configure this ``Style``'s styles.
:param kwargs: The styles to add/edit.
"""
self._dict.update(kwargs)
self._signal_style_changed()
config = configure
"""Alias for ``Style.configure``."""
def remove_styles(self, *styles):
"""
Remove the given styles from this ``Style``. This will raise a ``KeyError`` if any of the given style names are
not in this ``Style``.
:param styles: Style names to remove.
"""
for style in styles:
del self._dict[style]
self._signal_style_changed()
def _signal_style_changed(self):
"""
Internal method to update all the ``StyleableMixin`` widgets registered to this ``Style`` and its children.
"""
for s in self._styled:
s.update_style()
for child in self._children:
child._signal_style_changed()
def get_relevant_styles(self, widget):
"""
Determine and return all the styles in this ``Style`` recognized by the given tkinter widget.
:param widget: The tkinter widget to find styles for.
:return: All the styles recognized by the given tkinter widget.
"""
return {k: v for k, v in map(lambda k: (k, self.get_style(k)), widget.keys()) if v is not None}
def get_style(self, key):
"""
Return the style corresponding to the given style name, first checking this ``Style`` and its parents, then
resorting to the global default styles (``Style.DEFAULTS``).
:param key: The style name.
:type key: str
:return: The style corresponding to the given style name or None if it could not be found.
"""
return self._get_style(key) or self.__class__.DEFAULTS.get(key)
def _get_style(self, key):
"""
Attempt to retrieve the given style from this ``Style``'s ``Style._dict``. If that fails, recursively search
this widget's parents.
:param key: The style name.
:type key: str
:return: The style corresponding to the given style name or None if it could not be found.
"""
ret = self._dict.get(key)
if ret is None:
for p in self._parents:
ret = p._get_style(key)
if ret is not None:
break
return ret
@classmethod
def set_defaults(cls, keep_existing=True, **defaults):
"""
Convenience method to update the global default styles (``Style.DEFAULTS``).
:param keep_existing: Whether to keep the already existing default styles, or replace them.
:type keep_existing: bool
:param defaults: A dictionary of styles.
"""
if keep_existing:
cls.DEFAULTS.update(defaults)
else:
cls.DEFAULTS = defaults
def patch_tk_widgets():
"""Monkey patch the tkinter widgets with their styleable equivalents."""
tk.Button = Button
tk.Canvas = Canvas
tk.Checkbutton = Checkbutton
tk.Entry = Entry
tk.Frame = Frame
tk.Label = Label
tk.LabelFrame = LabelFrame
tk.Listbox = Listbox
tk.Menu = Menu
tk.PanedWindow = PanedWindow
tk.Radiobutton = Radiobutton
tk.Scale = Scale
tk.Scrollbar = Scrollbar
tk.Spinbox = Spinbox
tk.Text = Text
tk.Toplevel = Toplevel
def unpatch_tk_widgets():
"""Revert the tkinter widgets back to their defaults after monkey patching them with ``patch_tk_widgets``."""
tk.Button = _tkButton
tk.Canvas = _tkCanvas
tk.Checkbutton = _tkCheckbutton
tk.Entry = _tkEntry
tk.Frame = _tkFrame
tk.Label = _tkLabel
tk.LabelFrame = _tkLabelFrame
tk.Listbox = _tkListbox
tk.Menu = _tkMenu
tk.PanedWindow = _tkPanedWindow
tk.Radiobutton = _tkRadiobutton
tk.Scale = _tkScale
tk.Scrollbar = _tkScrollbar
tk.Spinbox = _tkSpinbox
tk.Text = _tkText
tk.Toplevel = _tkToplevel
@contextmanager
def patch():
"""Context manager to temporarily monkey patch the tkinter widgets with their styleable equivalents."""
try:
patch_tk_widgets()
yield
finally:
unpatch_tk_widgets()
@contextmanager
def stylize(style, **overrides):
"""
Context manager to temporarily apply a global-level style and some overrides. This global-level style will only be
used by ``StyleableMixin``s if they're not explicitly given a ``Style`` object already.
:param style: The style to apply.
:type style: Style
:param overrides: Style overrides to use.
"""
global _global_style
try:
_global_style = Style(style, **overrides) if overrides else style
yield
finally:
_global_style = None
def init_fonts(root):
"""
Initialize all the named fonts. This must be called prior to attempting to use any of the named fonts.
:param root: The tkinter root widget.
:type root: tkinter.Tk
"""
global FONT_MONOSPACE_TITLE, FONT_MONOSPACE_NORMAL,\
FONT_SERIF_TITLE, FONT_SERIF_NORMAL,\
FONT_SANS_SERIF_TITLE, FONT_SANS_SERIF_NORMAL
FONT_MONOSPACE_TITLE = tkfont.Font(root, size=10,
name="FONT_MONOSPACE_TITLE",
family="Courier",
weight=tkfont.BOLD)
FONT_MONOSPACE_NORMAL = tkfont.Font(root, size=8,
name="FONT_MONOSPACE_NORMAL",
family="Courier")
FONT_SERIF_TITLE = tkfont.Font(root, size=10,
name="FONT_SERIF_TITLE",
family="Times",
weight=tkfont.BOLD)
FONT_SERIF_NORMAL = tkfont.Font(root, size=8,
name="FONT_SERIF_NORMAL",
family="Times")
FONT_SANS_SERIF_TITLE = tkfont.Font(root, size=10,
name="FONT_SANS_SERIF_TITLE",
family="Helvetica",
weight=tkfont.BOLD)
FONT_SANS_SERIF_NORMAL = tkfont.Font(root, size=8,
name="FONT_SANS_SERIF_NORMAL",
family="Helvetica")
|
[
"tkinter.font.Font"
] |
[((22063, 22160), 'tkinter.font.Font', 'tkfont.Font', (['root'], {'size': '(10)', 'name': '"""FONT_MONOSPACE_TITLE"""', 'family': '"""Courier"""', 'weight': 'tkfont.BOLD'}), "(root, size=10, name='FONT_MONOSPACE_TITLE', family='Courier',\n weight=tkfont.BOLD)\n", (22074, 22160), True, 'import tkinter.font as tkfont\n'), ((22303, 22376), 'tkinter.font.Font', 'tkfont.Font', (['root'], {'size': '(8)', 'name': '"""FONT_MONOSPACE_NORMAL"""', 'family': '"""Courier"""'}), "(root, size=8, name='FONT_MONOSPACE_NORMAL', family='Courier')\n", (22314, 22376), True, 'import tkinter.font as tkfont\n'), ((22481, 22573), 'tkinter.font.Font', 'tkfont.Font', (['root'], {'size': '(10)', 'name': '"""FONT_SERIF_TITLE"""', 'family': '"""Times"""', 'weight': 'tkfont.BOLD'}), "(root, size=10, name='FONT_SERIF_TITLE', family='Times', weight=\n tkfont.BOLD)\n", (22492, 22573), True, 'import tkinter.font as tkfont\n'), ((22699, 22766), 'tkinter.font.Font', 'tkfont.Font', (['root'], {'size': '(8)', 'name': '"""FONT_SERIF_NORMAL"""', 'family': '"""Times"""'}), "(root, size=8, name='FONT_SERIF_NORMAL', family='Times')\n", (22710, 22766), True, 'import tkinter.font as tkfont\n'), ((22868, 22968), 'tkinter.font.Font', 'tkfont.Font', (['root'], {'size': '(10)', 'name': '"""FONT_SANS_SERIF_TITLE"""', 'family': '"""Helvetica"""', 'weight': 'tkfont.BOLD'}), "(root, size=10, name='FONT_SANS_SERIF_TITLE', family='Helvetica',\n weight=tkfont.BOLD)\n", (22879, 22968), True, 'import tkinter.font as tkfont\n'), ((23115, 23191), 'tkinter.font.Font', 'tkfont.Font', (['root'], {'size': '(8)', 'name': '"""FONT_SANS_SERIF_NORMAL"""', 'family': '"""Helvetica"""'}), "(root, size=8, name='FONT_SANS_SERIF_NORMAL', family='Helvetica')\n", (23126, 23191), True, 'import tkinter.font as tkfont\n')]
|
import re
from bson.regex import Regex
def test_qop_not_1(monty_find, mongo_find):
docs = [
{"a": 4},
{"x": 8}
]
spec = {"a": {"$not": {"$eq": 8}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
def test_qop_not_2(monty_find, mongo_find):
docs = [
{"a": 6},
{"a": [6]}
]
spec = {"a": {"$not": {"$eq": 6}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_not_3(monty_find, mongo_find):
docs = [
{"a": [{"b": 8}, {"b": 6}]},
]
spec = {"a.b": {"$not": {"$in": [6]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_not_4(monty_find, mongo_find):
docs = [
{"a": "apple"},
]
spec = {"a": {"$not": Regex("^a")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_not_5(monty_find, mongo_find):
docs = [
{"a": "apple"},
{"a": "banana"},
]
spec = {"a": {"$not": re.compile("^a")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(monty_c) == next(mongo_c)
|
[
"bson.regex.Regex",
"re.compile"
] |
[((1042, 1053), 'bson.regex.Regex', 'Regex', (['"""^a"""'], {}), "('^a')\n", (1047, 1053), False, 'from bson.regex import Regex\n'), ((1350, 1366), 're.compile', 're.compile', (['"""^a"""'], {}), "('^a')\n", (1360, 1366), False, 'import re\n')]
|
import asar
from rom import Rom
from patchexception import PatchException
import os
import re
class Routine:
incsrc = 'incsrc global_ow_code/defines.asm\nfreecode cleaned\n'
def __init__(self, file):
self.path = file
self.ptr = None
self.name = re.findall(r'\w+\.asm', file)[-1].replace('.asm', '')
with open(self.path, 'r') as r:
self.routine = f'print "$",pc\n{r.read()}\n\n'
def __str__(self):
return self.incsrc + 'parent:\n' + self.routine
def create_macro(self):
return f'macro {self.name}()\n\tJSL {self.ptr}\nendmacro\n'
def create_autoclean(self):
return f'autoclean {self.ptr}\n'
def patch_routine(self, rom: Rom):
with open(f'tmp_{self.name}.asm', 'w') as f:
f.write(str(self))
(success, rom_data) = asar.patch(f'tmp_{self.name}.asm', rom.data)
if success:
rom.data = rom_data
ptrs = asar.getprints()
self.ptr = ptrs[0]
print(f'Routine {self.name} was applied correctly')
else:
print(asar.geterrors())
raise PatchException(f'Routine {self.name} encountered an error while patching')
os.remove(f'tmp_{self.name}.asm')
|
[
"os.remove",
"asar.geterrors",
"asar.patch",
"patchexception.PatchException",
"re.findall",
"asar.getprints"
] |
[((838, 882), 'asar.patch', 'asar.patch', (['f"""tmp_{self.name}.asm"""', 'rom.data'], {}), "(f'tmp_{self.name}.asm', rom.data)\n", (848, 882), False, 'import asar\n'), ((1217, 1250), 'os.remove', 'os.remove', (['f"""tmp_{self.name}.asm"""'], {}), "(f'tmp_{self.name}.asm')\n", (1226, 1250), False, 'import os\n'), ((954, 970), 'asar.getprints', 'asar.getprints', ([], {}), '()\n', (968, 970), False, 'import asar\n'), ((1134, 1208), 'patchexception.PatchException', 'PatchException', (['f"""Routine {self.name} encountered an error while patching"""'], {}), "(f'Routine {self.name} encountered an error while patching')\n", (1148, 1208), False, 'from patchexception import PatchException\n'), ((1098, 1114), 'asar.geterrors', 'asar.geterrors', ([], {}), '()\n', (1112, 1114), False, 'import asar\n'), ((280, 310), 're.findall', 're.findall', (['"""\\\\w+\\\\.asm"""', 'file'], {}), "('\\\\w+\\\\.asm', file)\n", (290, 310), False, 'import re\n')]
|
from pathlib import Path
import aku
from torch import nn, optim
from houttuynia.monitors import get_monitor
from houttuynia.schedules import EpochalSchedule
from houttuynia.nn import Classifier
from houttuynia import log_system, manual_seed, to_device
from houttuynia.datasets import prepare_iris_dataset
from houttuynia.schedule import Moment, Pipeline
from houttuynia.extensions import ClipGradNorm, CommitScalarByMean, Evaluation
from houttuynia.triggers import Periodic
from houttuynia.utils import ensure_output_dir, experiment_hash, options_dump
class IrisEstimator(Classifier):
def __init__(self, in_features: int, num_classes: int, hidden_features: int, dropout: float,
bias: bool, negative_slope: float) -> None:
self.dropout = dropout
self.in_features = in_features
self.num_classes = num_classes
self.hidden_features = hidden_features
self.negative_slope = negative_slope
super(IrisEstimator, self).__init__(estimator=nn.Sequential(
nn.Dropout(dropout),
nn.Linear(in_features, hidden_features, bias),
nn.LeakyReLU(negative_slope=negative_slope, inplace=True),
nn.Linear(hidden_features, hidden_features, bias),
nn.LeakyReLU(negative_slope=negative_slope, inplace=True),
nn.Linear(hidden_features, num_classes, bias),
))
app = aku.App(__file__)
@app.register
def train(hidden_features: int = 100, dropout: float = 0.05,
bias: bool = True, negative_slope: float = 0.05,
seed: int = 42, device: str = 'cpu', batch_size: int = 5, num_epochs: int = 50,
out_dir: Path = Path('../out_dir'), monitor: ('filesystem', 'tensorboard') = 'tensorboard'):
""" train iris classifier
Args:
hidden_features: the size of hidden layers
dropout: the dropout ratio
bias: whether or not use the bias in hidden layers
negative_slope: the ratio of negative part
seed: the random seed number
device: device id
batch_size: the size of each batch
num_epochs: the total numbers of epochs
out_dir: the root path of output
monitor: the type of monitor
"""
options = locals()
experiment_dir = out_dir / experiment_hash(**options)
ensure_output_dir(experiment_dir)
options_dump(experiment_dir, **options)
log_system.notice(f'experiment_dir => {experiment_dir}')
manual_seed(seed)
log_system.notice(f'seed => {seed}')
train, test = prepare_iris_dataset(batch_size)
estimator = IrisEstimator(
in_features=4, dropout=dropout, num_classes=3, hidden_features=hidden_features,
negative_slope=negative_slope, bias=bias
)
optimizer = optim.Adam(estimator.parameters())
monitor = get_monitor(monitor)(log_dir=experiment_dir)
to_device(device, estimator)
schedule = EpochalSchedule(estimator, optimizer, monitor)
schedule.register_extension(Periodic(Moment.AFTER_ITERATION, iteration=5))(CommitScalarByMean(
'criterion', 'acc', chapter='train',
))
schedule.register_extension(Periodic(Moment.AFTER_BACKWARD, iteration=1))(ClipGradNorm(max_norm=4.))
schedule.register_extension(Periodic(Moment.AFTER_EPOCH, epoch=1))(Pipeline(
Evaluation(data_loader=test, chapter='test'),
CommitScalarByMean('criterion', 'acc', chapter='test'),
))
return schedule.run(train, num_epochs)
if __name__ == '__main__':
app.run()
|
[
"torch.nn.Dropout",
"houttuynia.to_device",
"houttuynia.extensions.CommitScalarByMean",
"houttuynia.triggers.Periodic",
"houttuynia.monitors.get_monitor",
"houttuynia.schedules.EpochalSchedule",
"houttuynia.manual_seed",
"houttuynia.extensions.ClipGradNorm",
"torch.nn.Linear",
"pathlib.Path",
"aku.App",
"houttuynia.utils.options_dump",
"houttuynia.utils.ensure_output_dir",
"houttuynia.datasets.prepare_iris_dataset",
"houttuynia.utils.experiment_hash",
"houttuynia.extensions.Evaluation",
"torch.nn.LeakyReLU",
"houttuynia.log_system.notice"
] |
[((1393, 1410), 'aku.App', 'aku.App', (['__file__'], {}), '(__file__)\n', (1400, 1410), False, 'import aku\n'), ((1663, 1681), 'pathlib.Path', 'Path', (['"""../out_dir"""'], {}), "('../out_dir')\n", (1667, 1681), False, 'from pathlib import Path\n'), ((2303, 2336), 'houttuynia.utils.ensure_output_dir', 'ensure_output_dir', (['experiment_dir'], {}), '(experiment_dir)\n', (2320, 2336), False, 'from houttuynia.utils import ensure_output_dir, experiment_hash, options_dump\n'), ((2341, 2380), 'houttuynia.utils.options_dump', 'options_dump', (['experiment_dir'], {}), '(experiment_dir, **options)\n', (2353, 2380), False, 'from houttuynia.utils import ensure_output_dir, experiment_hash, options_dump\n'), ((2385, 2441), 'houttuynia.log_system.notice', 'log_system.notice', (['f"""experiment_dir => {experiment_dir}"""'], {}), "(f'experiment_dir => {experiment_dir}')\n", (2402, 2441), False, 'from houttuynia import log_system, manual_seed, to_device\n'), ((2447, 2464), 'houttuynia.manual_seed', 'manual_seed', (['seed'], {}), '(seed)\n', (2458, 2464), False, 'from houttuynia import log_system, manual_seed, to_device\n'), ((2469, 2505), 'houttuynia.log_system.notice', 'log_system.notice', (['f"""seed => {seed}"""'], {}), "(f'seed => {seed}')\n", (2486, 2505), False, 'from houttuynia import log_system, manual_seed, to_device\n'), ((2525, 2557), 'houttuynia.datasets.prepare_iris_dataset', 'prepare_iris_dataset', (['batch_size'], {}), '(batch_size)\n', (2545, 2557), False, 'from houttuynia.datasets import prepare_iris_dataset\n'), ((2848, 2876), 'houttuynia.to_device', 'to_device', (['device', 'estimator'], {}), '(device, estimator)\n', (2857, 2876), False, 'from houttuynia import log_system, manual_seed, to_device\n'), ((2893, 2939), 'houttuynia.schedules.EpochalSchedule', 'EpochalSchedule', (['estimator', 'optimizer', 'monitor'], {}), '(estimator, optimizer, monitor)\n', (2908, 2939), False, 'from houttuynia.schedules import EpochalSchedule\n'), ((2272, 2298), 'houttuynia.utils.experiment_hash', 'experiment_hash', ([], {}), '(**options)\n', (2287, 2298), False, 'from houttuynia.utils import ensure_output_dir, experiment_hash, options_dump\n'), ((2798, 2818), 'houttuynia.monitors.get_monitor', 'get_monitor', (['monitor'], {}), '(monitor)\n', (2809, 2818), False, 'from houttuynia.monitors import get_monitor\n'), ((3019, 3074), 'houttuynia.extensions.CommitScalarByMean', 'CommitScalarByMean', (['"""criterion"""', '"""acc"""'], {'chapter': '"""train"""'}), "('criterion', 'acc', chapter='train')\n", (3037, 3074), False, 'from houttuynia.extensions import ClipGradNorm, CommitScalarByMean, Evaluation\n'), ((3169, 3195), 'houttuynia.extensions.ClipGradNorm', 'ClipGradNorm', ([], {'max_norm': '(4.0)'}), '(max_norm=4.0)\n', (3181, 3195), False, 'from houttuynia.extensions import ClipGradNorm, CommitScalarByMean, Evaluation\n'), ((2972, 3017), 'houttuynia.triggers.Periodic', 'Periodic', (['Moment.AFTER_ITERATION'], {'iteration': '(5)'}), '(Moment.AFTER_ITERATION, iteration=5)\n', (2980, 3017), False, 'from houttuynia.triggers import Periodic\n'), ((3123, 3167), 'houttuynia.triggers.Periodic', 'Periodic', (['Moment.AFTER_BACKWARD'], {'iteration': '(1)'}), '(Moment.AFTER_BACKWARD, iteration=1)\n', (3131, 3167), False, 'from houttuynia.triggers import Periodic\n'), ((3228, 3265), 'houttuynia.triggers.Periodic', 'Periodic', (['Moment.AFTER_EPOCH'], {'epoch': '(1)'}), '(Moment.AFTER_EPOCH, epoch=1)\n', (3236, 3265), False, 'from houttuynia.triggers import Periodic\n'), ((3285, 3329), 'houttuynia.extensions.Evaluation', 'Evaluation', ([], {'data_loader': 'test', 'chapter': '"""test"""'}), "(data_loader=test, chapter='test')\n", (3295, 3329), False, 'from houttuynia.extensions import ClipGradNorm, CommitScalarByMean, Evaluation\n'), ((3339, 3393), 'houttuynia.extensions.CommitScalarByMean', 'CommitScalarByMean', (['"""criterion"""', '"""acc"""'], {'chapter': '"""test"""'}), "('criterion', 'acc', chapter='test')\n", (3357, 3393), False, 'from houttuynia.extensions import ClipGradNorm, CommitScalarByMean, Evaluation\n'), ((1030, 1049), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1040, 1049), False, 'from torch import nn, optim\n'), ((1063, 1108), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'hidden_features', 'bias'], {}), '(in_features, hidden_features, bias)\n', (1072, 1108), False, 'from torch import nn, optim\n'), ((1122, 1179), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': 'negative_slope', 'inplace': '(True)'}), '(negative_slope=negative_slope, inplace=True)\n', (1134, 1179), False, 'from torch import nn, optim\n'), ((1193, 1242), 'torch.nn.Linear', 'nn.Linear', (['hidden_features', 'hidden_features', 'bias'], {}), '(hidden_features, hidden_features, bias)\n', (1202, 1242), False, 'from torch import nn, optim\n'), ((1256, 1313), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': 'negative_slope', 'inplace': '(True)'}), '(negative_slope=negative_slope, inplace=True)\n', (1268, 1313), False, 'from torch import nn, optim\n'), ((1327, 1372), 'torch.nn.Linear', 'nn.Linear', (['hidden_features', 'num_classes', 'bias'], {}), '(hidden_features, num_classes, bias)\n', (1336, 1372), False, 'from torch import nn, optim\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import json
import tensorflow as tf
import util
import json
except_name = ["公司", "本公司", "该公司", "贵公司", "贵司", "本行", "该行", "本银行", "该集团", "本集团", "集团",
"它", "他们", "他们", "我们", "该股", "其", "自身"]
def check_span(fla_sentences, span):
"""检查span对应词语是否符合要求"""
if "".join(fla_sentences[span[0] - 1]) in ["该", "本", "贵"]: # 对span进行补全
span = (span[0]-1, span[1])
return span
def flatten_sentence(sentences):
"""将多维列表展开"""
return [char for sentence in sentences for char in sentence]
def max_all_count(dict0):
"""获取字典当中的count最大值"""
a = max([(value, key) for key, value in dict0.items()])
return a[0]
def cluster_rule(example):
"""
对模型预测结果进行算法修正
:param example: 一条json数据
:return: 纠正后的predicted_cluster
"""
fla_sentences = flatten_sentence(example["sentences"])
res_clusters = []
com2cluster = {}
except_cluster = {}
for cluster in example["predicted_clusters"]:
res_cluster = []
span_count = {}
span2pos = {}
for span in cluster:
if "".join(fla_sentences[span[0]:span[1] + 1]) in ["公司", "集团"]: # 对缺失字符进行补充
span = check_span(fla_sentences, span)
if "#" in "".join(fla_sentences[span[0]:span[1] + 1]): # 对不合法单词进行过滤
continue
res_cluster.append(span)
word = "".join(fla_sentences[span[0]:span[1] + 1])
span_count[word] = span_count.get(word, 0) + 1
if span2pos.get(word, None) is not None:
span2pos[word].append(span)
else:
span2pos[word] = [span]
com_name = set(span_count.keys())
for ex in except_name:
com_name.discard(ex)
max_name = ""
max_count = 0
for com in com_name: # 获取cluster当中的频率最大的单词
if span_count[com] > max_count:
max_count = span_count[com]
max_name = com
elif span_count[com] == max_count and len(com) > len(max_name):
max_count = span_count[com]
max_name = com
print("max_name:{}".format(max_name))
for com in com_name: # 公司名称
if com[:2] == max_name[:2]: # 头部两个字相同则认为两公司相同
continue
elif len(com) < len(max_name) and com in max_name: # 具有包含关系的两公司,则认为相同
continue
elif len(com) > len(max_name) and max_name in com:
continue
else:
print(com)
# span2pos[com]
except_cluster[com] = span2pos[com] # 该公司名
for n in span2pos[com]: # 错误预测的span将会筛除
res_cluster.remove(n)
if com2cluster.get(max_name, None) is None:
com2cluster[max_name] = res_cluster
else:
print(res_cluster)
com2cluster[max_name].extend(res_cluster)
for key, value in except_cluster.items(): # 这步是十分有用的
if com2cluster.get(key, None) is None:
print("该span将被彻底清除:{}".format(key))
continue
else:
print("{}重新融入别的cluster当中".format(key), value)
com2cluster[key].extend(value)
# res_clusters.append(res_cluster)
for v_cluster in com2cluster.values():
res_clusters.append(v_cluster)
return res_clusters
if __name__ == "__main__":
"""
命令行示例
python predict.py bert_base conll-2012/tagging_pure/tagging_dev_pos.chinese.128.jsonlines result_of_20.txt
"""
config = util.initialize_from_env()
log_dir = config["log_dir"]
# Input file in .jsonlines format.
input_filename = sys.argv[2] # 输入数据地址
# Predictions will be written to this file in .jsonlines format.
output_filename = sys.argv[3] # 输出地址
model = util.get_model(config)
saver = tf.train.Saver()
with tf.Session() as session:
model.restore(session)
with open(output_filename, "w") as output_file:
with open(input_filename) as input_file:
for example_num, line in enumerate(input_file.readlines()):
example = json.loads(line)
tensorized_example = model.tensorize_example(example, is_training=False)
feed_dict = {i: t for i, t in zip(model.input_tensors, tensorized_example)}
_, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(model.predictions, feed_dict=feed_dict)
predicted_antecedents = model.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
example["predicted_clusters"], mention_to_predict = model.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
example["top_spans"] = list(zip((int(i) for i in top_span_starts), (int(i) for i in top_span_ends)))
example['head_scores'] = []
example["mention_to_predict"] = str(mention_to_predict)
example["predicted_clusters"] = cluster_rule(example)
output_file.write(str(json.dumps(example, ensure_ascii=False)))
output_file.write("\n")
if example_num % 100 == 0:
print("Decoded {} examples.".format(example_num + 1))
|
[
"json.loads",
"tensorflow.train.Saver",
"tensorflow.Session",
"json.dumps",
"util.get_model",
"util.initialize_from_env"
] |
[((3660, 3686), 'util.initialize_from_env', 'util.initialize_from_env', ([], {}), '()\n', (3684, 3686), False, 'import util\n'), ((3938, 3960), 'util.get_model', 'util.get_model', (['config'], {}), '(config)\n', (3952, 3960), False, 'import util\n'), ((3973, 3989), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3987, 3989), True, 'import tensorflow as tf\n'), ((4000, 4012), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4010, 4012), True, 'import tensorflow as tf\n'), ((4250, 4266), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4260, 4266), False, 'import json\n'), ((5166, 5205), 'json.dumps', 'json.dumps', (['example'], {'ensure_ascii': '(False)'}), '(example, ensure_ascii=False)\n', (5176, 5205), False, 'import json\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility methods related to folders
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import time
import errno
import shutil
import fnmatch
import logging
import tempfile
import traceback
import subprocess
from distutils.dir_util import copy_tree
logger = logging.getLogger('tpDcc-libs-python')
def create_folder(name, directory=None, make_unique=False):
"""
Creates a new folder on the given path and with the given name
:param name: str, name of the new directory
:param directory: str, path to the new directory
:param make_unique: bool, Whether to pad the name with a number to make it unique if the folder is not unique
:return: variant, str || bool, folder name with path or False if the folder creation failed
"""
from tpDcc.libs.python import path, osplatform
full_path = False
if directory is None:
full_path = name
if not name:
full_path = directory
if name and directory:
full_path = path.join_path(directory, name)
if make_unique:
full_path = path.unique_path_name(directory=full_path)
if not full_path:
return False
if path.is_dir(full_path):
return full_path
try:
os.makedirs(full_path)
except Exception:
return False
osplatform.get_permission(full_path)
return full_path
def rename_folder(directory, name, make_unique=False):
"""
Renames given with a new name
:param directory: str, full path to the directory we want to rename
:param name: str, new name of the folder we want to rename
:param make_unique: bool, Whether to add a number to the folder name to make it unique
:return: str, path of the renamed folder
"""
from tpDcc.libs.python import path, osplatform
base_name = path.get_basename(directory=directory)
if base_name == name:
return
parent_path = path.get_dirname(directory=directory)
rename_path = path.join_path(parent_path, name)
if make_unique:
rename_path = path.unique_path_name(directory=rename_path)
if path.exists(rename_path):
return False
try:
osplatform.get_permission(directory)
message = 'rename: {0} >> {1}'.format(directory, rename_path)
logger.info(message)
os.rename(directory, rename_path)
except Exception:
time.sleep(0.1)
try:
os.rename(directory, rename_path)
except Exception:
logger.error('{}'.format(traceback.format_exc()))
return False
return rename_path
def copy_folder(directory, directory_destination, ignore_patterns=[]):
"""
Copy the given directory into a new directory
:param directory: str, directory to copy with full path
:param directory_destination: str, destination directory
:param ignore_patterns: list<str>, extensions we want to ignore when copying folder elements
If ['txt', 'py'] is given all py and text extension files will be ignored during the copy operation
:return: str, destination directory
"""
from tpDcc.libs.python import path, osplatform
if not path.is_dir(directory=directory):
return
if not ignore_patterns:
cmd = None
if osplatform.is_linux():
cmd = ['rsync', directory, directory_destination, '-azr']
elif osplatform.is_windows():
cmd = [
'robocopy', directory.replace('/', '\\'), directory_destination.replace('/', '\\'), '/S', '/Z', '/MIR']
if cmd:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if out:
logger.error(err)
else:
shutil.copytree(directory, directory_destination)
else:
shutil.copytree(directory, directory_destination, ignore=shutil.ignore_patterns(ignore_patterns))
return directory_destination
def move_folder(source_directory, target_directory, only_contents=False):
"""
Moves the folder pointed by source_directory under the directory target_directory
:param source_directory: str, folder with full path
:param target_directory: str, path where path1 should be move into
:param only_contents: bool, Whether to move the folder or only its contents
:return: bool, Whether the move operation was successfully
"""
try:
if only_contents or os.path.isdir(target_directory):
file_list = os.listdir(source_directory)
for i in file_list:
src = os.path.join(source_directory, i)
dest = os.path.join(target_directory, i)
if os.path.exists(dest):
if os.path.isdir(dest):
move_folder(src, dest)
continue
else:
os.remove(dest)
shutil.move(src, target_directory)
else:
shutil.move(source_directory, target_directory)
except Exception as exc:
logger.warning('Failed to move {} to {}: {}'.format(source_directory, target_directory, exc))
return False
return True
def copy_directory_contents(path1, path2, *args, **kwargs):
"""
Copies all the contents of the given path1 to the folder path2. If path2 directory does not
exists, it will be created
:param path1: str
:param path2: str
:param args:
:param kwargs:
:return:
"""
try:
copy_tree(path1, path2, *args, **kwargs)
except Exception:
logger.warning('Failed to move contents of {0} to {1}'.format(path1, path2))
return False
return True
def delete_folder(folder_name, directory=None):
"""
Deletes the folder by name in the given directory
:param folder_name: str, name of the folder to delete
:param directory: str, the directory path where the folder is stored
:return: str, folder that was deleted with path
"""
from tpDcc.libs.python import name, path, osplatform
def delete_read_only_error(action, name, exc):
"""
Helper to delete read only files
"""
osplatform.get_permission(name)
action(name)
folder_name = name.clean_file_string(folder_name)
full_path = folder_name
if directory:
full_path = path.join_path(directory, folder_name)
if not path.is_dir(full_path):
return None
try:
shutil.rmtree(full_path, onerror=delete_read_only_error)
except Exception as exc:
logger.warning('Could not remove children of path "{}" | {}'.format(full_path, exc))
return full_path
def clean_folder(directory):
"""
Removes everything in the given directory
:param directory: str
"""
from tpDcc.libs.python import path, fileio, folder
base_name = path.get_basename(directory=directory)
dir_name = path.get_dirname(directory=directory)
if path.is_dir(directory):
try:
files = folder.get_files(directory)
except Exception:
files = list()
for f in files:
fileio.delete_file(f, directory)
delete_folder(base_name, dir_name)
if not path.is_dir(directory):
create_folder(base_name, dir_name)
def get_folder_size(directory, round_value=2, skip_names=None):
"""
Returns the size of the given folder
:param directory: str
:param round_value: int, value to round size to
:return: str
"""
from tpDcc.libs.python import python, path, fileio
skip_names = python.force_list(skip_names)
size = 0
for root, dirs, files in os.walk(directory):
root_name = path.get_basename(root)
if root_name in skip_names:
continue
for name in files:
if name in skip_names:
continue
size += fileio.get_file_size(path.join_path(root, name), round_value)
return size
def get_size(file_path, round_value=2):
"""
Return the size of the given directory or file path
:param file_path: str
:param round_value: int, value to round size to
:return: int
"""
from tpDcc.libs.python import fileio, path
size = 0
if path.is_dir(file_path):
size = get_folder_size(file_path, round_value)
if path.is_file(file_path):
size = fileio.get_file_size(file_path, round_value)
return size
def get_sub_folders(root_folder, sort=True):
"""
Return a list with all the sub folders names on a directory
:param root_folder: str, folder we want to search sub folders for
:param sort: bool, True if we want sort alphabetically the returned folders or False otherwise
:return: list<str>, sub folders names
"""
if not os.path.exists(root_folder):
raise RuntimeError('Folder {0} does not exists!'.format(root_folder))
file_names = os.listdir(root_folder)
result = list()
for f in file_names:
if os.path.isdir(os.path.join(os.path.abspath(root_folder), f)):
result.append(f)
if sort:
result.sort()
return result
def get_folders(root_folder, recursive=False, full_path=False):
"""
Get folders found in the root folder
:param root_folder: str, folder we ant to search folders on
:param recursive: bool, Whether to search in all root folder child folders or not
:return: list<str>
"""
from tpDcc.libs.python import path
found_folders = list()
if not recursive:
try:
found_folders = next(os.walk(root_folder))[1]
except Exception:
pass
else:
try:
for root, dirs, files in os.walk(root_folder):
for d in dirs:
if full_path:
folder_name = path.join_path(root, d)
found_folders.append(folder_name)
else:
folder_name = path.join_path(root, d)
folder_name = os.path.relpath(folder_name, root_folder)
folder_name = path.clean_path(folder_name)
found_folders.append(folder_name)
except Exception:
return found_folders
return found_folders
def get_folders_without_dot_prefix(directory, recursive=False, base_directory=None):
from tpDcc.libs.python import path, version
if not path.exists(directory):
return
found_folders = list()
base_directory = base_directory or directory
folders = get_folders(directory)
for folder in folders:
if folder == 'version':
version = version.VersionFile(directory)
if version.updated_old:
continue
if folder.startswith('.'):
continue
folder_path = path.join_path(directory, folder)
folder_name = path.clean_path(os.path.relpath(folder_path, base_directory))
found_folders.append(folder_name)
if recursive:
sub_folders = get_folders_without_dot_prefix(
folder_path, recursive=recursive, base_directory=base_directory)
found_folders += sub_folders
return found_folders
def get_files(root_folder, full_path=False, recursive=False, pattern="*"):
"""
Returns files found in the given folder
:param root_folder: str, folder we want to search files on
:param full_path: bool, if true, full path to the files will be returned otherwise file names will be returned
:return: list<str>
"""
from tpDcc.libs.python import path
if not path.is_dir(root_folder):
return []
# TODO: For now pattern only works in recursive searches. Fix it to work on both
found = list()
if recursive:
for dir_path, dir_names, file_names in os.walk(root_folder):
for file_name in fnmatch.filter(file_names, pattern):
if full_path:
found.append(path.join_path(dir_path, file_name))
else:
found.append(file_name)
else:
files = os.listdir(root_folder)
for f in files:
file_path = path.join_path(root_folder, f)
if path.is_file(file_path=file_path):
if full_path:
found.append(file_path)
else:
found.append(f)
return found
def get_files_and_folders(directory):
"""
Get files and folders found in the given directory
:param directory: str, folder we want to get files and folders from
:return: list<str>
"""
try:
files = os.listdir(directory)
except Exception:
files = list()
return files
def get_files_with_extension(extension, root_directory, full_path=False, recursive=False, filter_text=''):
"""
Returns file in given directory with given extensions
:param extension: str, extension to find (.py, .data, etc)
:param root_directory: str, directory path
:param full_path: bool, Whether to return the file path or just the file names
:param recursive: bool
:param filter_text: str
:return: list(str)
"""
found = list()
if not extension.startswith('.'):
extension = '.{}'.format(extension)
if recursive:
for dir_path, dir_names, file_names in os.walk(root_directory):
for file_name in file_names:
filename, found_extension = os.path.splitext(file_name)
if found_extension == '{}'.format(extension):
if not full_path:
found.append(file_name)
else:
found.append(os.path.join(root_directory, file_name))
else:
try:
objs = os.listdir(root_directory)
except Exception:
return found
for filename_and_extension in objs:
filename, found_extension = os.path.splitext(filename_and_extension)
if filter_text and filename_and_extension.find(filter_text) == -1:
continue
if found_extension == extension:
if not full_path:
found.append(filename_and_extension)
else:
found.append(os.path.join(root_directory, filename_and_extension))
return found
def get_files_date_sorted(root_directory, extension=None, filter_text=''):
"""
Returns files date sorted found in the given directory
:param root_directory: str, directory path
:param extension: str, optional extension to find
:param filter_text: str, optional text filtering
:return: list(str), list of files date sorted in the directory
"""
from tpDcc.libs.python import fileio
def _get_mtime(fld):
return os.stat(os.path.join(root_directory, fld)).st_mtime
if not extension:
files = fileio.get_files(root_directory, filter_text=filter_text)
else:
files = get_files_with_extension(extension=extension, root_directory=root_directory, filter_text=filter_text)
return list(sorted(files, key=_get_mtime))
def open_folder(path=None):
"""
Opens a folder in the explorer in a independent platform way
If not path is passed the current directory will be opened
:param path: str, folder path to open
"""
if path is None:
path = os.path.curdir
if sys.platform == 'darwin':
subprocess.check_call(['open', '--', path])
elif sys.platform == 'linux2':
subprocess.Popen(['xdg-open', path])
elif sys.platform in ['windows', 'win32', 'win64']:
if path.endswith('/'):
path = path[:-1]
new_path = path.replace('/', '\\')
try:
subprocess.check_call(['explorer', new_path], shell=False)
except Exception:
pass
def get_user_folder(absolute=True):
"""
Get path to the user folder
:return: str, path to the user folder
"""
from tpDcc.libs.python import path
if absolute:
return path.clean_path(os.path.abspath(os.path.expanduser('~')))
else:
return path.clean_path(os.path.expanduser('~'))
def get_temp_folder():
"""
Get the path to the temp folder
:return: str, path to the temp folder
"""
from tpDcc.libs.python import path
return path.clean_path(tempfile.gettempdir())
def get_current_working_directory():
"""
Returns current working directory
:return: str, path to the current working directory
"""
return os.getcwd()
def get_folders_from_path(path):
"""
Gets a list of sub folders in the given path
:param path: str
:return: list<str>
"""
folders = list()
while True:
path, folder = os.path.split(path)
if folder != '':
folders.append(folder)
else:
if path != '':
folders.append(path)
break
folders.reverse()
return folders
def get_folders_date_sorted(root_folder):
"""
Returns folder dates sorted found in the given root directory
:param root_folder: str, directory path
:return: list(str): list of folder date sorted in the directory
"""
def _get_mtime(fld):
return os.stat(os.path.join(root_folder, fld)).st_mtime
return list(sorted(os.listdir(root_folder), key=_get_mtime))
def ensure_folder_exists(folder_path, permissions=0o755, place_holder=False):
"""
Checks that folder given folder exists. If not, folder is created.
:param folder_path: str, folder path to check or created
:param permissions:int, folder permission mode
:param place_holder: bool, Whether to create place holder text file or not
:raise OSError: raise OSError if the creation of the folder fails
"""
if not os.path.exists(folder_path):
try:
logger.debug('Creating folder {} [{}]'.format(folder_path, permissions))
os.makedirs(folder_path, permissions)
if place_holder:
place_path = os.path.join(folder_path, 'placeholder')
if not os.path.exists(place_path):
with open(place_path, 'wt') as fh:
fh.write('Automatically generated place holder file')
except OSError as err:
if err.errno != errno.EEXIST:
raise
def get_latest_file_at_folder(folder_path, filter_text=''):
"""
Returns the latest path added to a folder
:param folder_path:
:param filter_text:
:return: str
"""
from tpDcc.libs.python import path
files = get_files_date_sorted(folder_path, filter_text=filter_text)
if not files:
return None
return path.join_path(folder_path, files[-1])
def walk_level(root_directory, level=None):
root_directory = root_directory.rstrip(os.path.sep)
assert os.path.isdir(root_directory)
if level is None:
for root, dirs, files in os.walk(root_directory):
yield root, dirs, files
else:
num_sep = root_directory.count(os.path.sep)
for root, dirs, files in os.walk(root_directory):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
|
[
"shutil.ignore_patterns",
"os.remove",
"tpDcc.libs.python.path.is_dir",
"tpDcc.libs.python.osplatform.is_windows",
"os.walk",
"tpDcc.libs.python.path.clean_path",
"shutil.rmtree",
"tpDcc.libs.python.name.clean_file_string",
"os.path.join",
"subprocess.check_call",
"os.path.abspath",
"tpDcc.libs.python.path.join_path",
"os.path.exists",
"traceback.format_exc",
"tpDcc.libs.python.path.is_file",
"distutils.dir_util.copy_tree",
"subprocess.Popen",
"os.rename",
"tpDcc.libs.python.path.get_basename",
"time.sleep",
"tpDcc.libs.python.path.replace",
"tpDcc.libs.python.osplatform.get_permission",
"tpDcc.libs.python.fileio.get_files",
"tpDcc.libs.python.osplatform.is_linux",
"tpDcc.libs.python.version.VersionFile",
"tpDcc.libs.python.folder.get_files",
"tpDcc.libs.python.path.unique_path_name",
"tpDcc.libs.python.path.get_dirname",
"os.listdir",
"tpDcc.libs.python.folder.startswith",
"tpDcc.libs.python.fileio.delete_file",
"fnmatch.filter",
"tpDcc.libs.python.python.force_list",
"os.makedirs",
"tpDcc.libs.python.path.endswith",
"tpDcc.libs.python.path.exists",
"os.getcwd",
"os.path.isdir",
"tempfile.gettempdir",
"os.path.relpath",
"shutil.move",
"os.path.splitext",
"shutil.copytree",
"os.path.split",
"os.path.expanduser",
"tpDcc.libs.python.fileio.get_file_size",
"logging.getLogger"
] |
[((352, 390), 'logging.getLogger', 'logging.getLogger', (['"""tpDcc-libs-python"""'], {}), "('tpDcc-libs-python')\n", (369, 390), False, 'import logging\n'), ((1238, 1260), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', (['full_path'], {}), '(full_path)\n', (1249, 1260), False, 'from tpDcc.libs.python import path\n'), ((1376, 1412), 'tpDcc.libs.python.osplatform.get_permission', 'osplatform.get_permission', (['full_path'], {}), '(full_path)\n', (1401, 1412), False, 'from tpDcc.libs.python import name, path, osplatform\n'), ((1882, 1920), 'tpDcc.libs.python.path.get_basename', 'path.get_basename', ([], {'directory': 'directory'}), '(directory=directory)\n', (1899, 1920), False, 'from tpDcc.libs.python import path\n'), ((1981, 2018), 'tpDcc.libs.python.path.get_dirname', 'path.get_dirname', ([], {'directory': 'directory'}), '(directory=directory)\n', (1997, 2018), False, 'from tpDcc.libs.python import path\n'), ((2037, 2070), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['parent_path', 'name'], {}), '(parent_path, name)\n', (2051, 2070), False, 'from tpDcc.libs.python import path\n'), ((2167, 2191), 'tpDcc.libs.python.path.exists', 'path.exists', (['rename_path'], {}), '(rename_path)\n', (2178, 2191), False, 'from tpDcc.libs.python import path\n'), ((6337, 6372), 'tpDcc.libs.python.name.clean_file_string', 'name.clean_file_string', (['folder_name'], {}), '(folder_name)\n', (6359, 6372), False, 'from tpDcc.libs.python import name, path, osplatform\n'), ((6944, 6982), 'tpDcc.libs.python.path.get_basename', 'path.get_basename', ([], {'directory': 'directory'}), '(directory=directory)\n', (6961, 6982), False, 'from tpDcc.libs.python import path\n'), ((6998, 7035), 'tpDcc.libs.python.path.get_dirname', 'path.get_dirname', ([], {'directory': 'directory'}), '(directory=directory)\n', (7014, 7035), False, 'from tpDcc.libs.python import path\n'), ((7044, 7066), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', (['directory'], {}), '(directory)\n', (7055, 7066), False, 'from tpDcc.libs.python import path\n'), ((7666, 7695), 'tpDcc.libs.python.python.force_list', 'python.force_list', (['skip_names'], {}), '(skip_names)\n', (7683, 7695), False, 'from tpDcc.libs.python import python, path, fileio\n'), ((7739, 7757), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (7746, 7757), False, 'import os\n'), ((8324, 8346), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', (['file_path'], {}), '(file_path)\n', (8335, 8346), False, 'from tpDcc.libs.python import path\n'), ((8410, 8433), 'tpDcc.libs.python.path.is_file', 'path.is_file', (['file_path'], {}), '(file_path)\n', (8422, 8433), False, 'from tpDcc.libs.python import path\n'), ((8986, 9009), 'os.listdir', 'os.listdir', (['root_folder'], {}), '(root_folder)\n', (8996, 9009), False, 'import os\n'), ((16625, 16636), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16634, 16636), False, 'import os\n'), ((18801, 18839), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['folder_path', 'files[-1]'], {}), '(folder_path, files[-1])\n', (18815, 18839), False, 'from tpDcc.libs.python import path\n'), ((18953, 18982), 'os.path.isdir', 'os.path.isdir', (['root_directory'], {}), '(root_directory)\n', (18966, 18982), False, 'import os\n'), ((1070, 1101), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['directory', 'name'], {}), '(directory, name)\n', (1084, 1101), False, 'from tpDcc.libs.python import path\n'), ((1143, 1185), 'tpDcc.libs.python.path.unique_path_name', 'path.unique_path_name', ([], {'directory': 'full_path'}), '(directory=full_path)\n', (1164, 1185), False, 'from tpDcc.libs.python import path\n'), ((1305, 1327), 'os.makedirs', 'os.makedirs', (['full_path'], {}), '(full_path)\n', (1316, 1327), False, 'import os\n'), ((2114, 2158), 'tpDcc.libs.python.path.unique_path_name', 'path.unique_path_name', ([], {'directory': 'rename_path'}), '(directory=rename_path)\n', (2135, 2158), False, 'from tpDcc.libs.python import path\n'), ((2232, 2268), 'tpDcc.libs.python.osplatform.get_permission', 'osplatform.get_permission', (['directory'], {}), '(directory)\n', (2257, 2268), False, 'from tpDcc.libs.python import name, path, osplatform\n'), ((2376, 2409), 'os.rename', 'os.rename', (['directory', 'rename_path'], {}), '(directory, rename_path)\n', (2385, 2409), False, 'import os\n'), ((3217, 3249), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', ([], {'directory': 'directory'}), '(directory=directory)\n', (3228, 3249), False, 'from tpDcc.libs.python import path\n'), ((3324, 3345), 'tpDcc.libs.python.osplatform.is_linux', 'osplatform.is_linux', ([], {}), '()\n', (3343, 3345), False, 'from tpDcc.libs.python import name, path, osplatform\n'), ((5592, 5632), 'distutils.dir_util.copy_tree', 'copy_tree', (['path1', 'path2', '*args'], {}), '(path1, path2, *args, **kwargs)\n', (5601, 5632), False, 'from distutils.dir_util import copy_tree\n'), ((6265, 6296), 'tpDcc.libs.python.osplatform.get_permission', 'osplatform.get_permission', (['name'], {}), '(name)\n', (6290, 6296), False, 'from tpDcc.libs.python import name, path, osplatform\n'), ((6439, 6477), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['directory', 'folder_name'], {}), '(directory, folder_name)\n', (6453, 6477), False, 'from tpDcc.libs.python import path\n'), ((6489, 6511), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', (['full_path'], {}), '(full_path)\n', (6500, 6511), False, 'from tpDcc.libs.python import path\n'), ((6551, 6607), 'shutil.rmtree', 'shutil.rmtree', (['full_path'], {'onerror': 'delete_read_only_error'}), '(full_path, onerror=delete_read_only_error)\n', (6564, 6607), False, 'import shutil\n'), ((7307, 7329), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', (['directory'], {}), '(directory)\n', (7318, 7329), False, 'from tpDcc.libs.python import path\n'), ((7779, 7802), 'tpDcc.libs.python.path.get_basename', 'path.get_basename', (['root'], {}), '(root)\n', (7796, 7802), False, 'from tpDcc.libs.python import path\n'), ((8450, 8494), 'tpDcc.libs.python.fileio.get_file_size', 'fileio.get_file_size', (['file_path', 'round_value'], {}), '(file_path, round_value)\n', (8470, 8494), False, 'from tpDcc.libs.python import fileio\n'), ((8862, 8889), 'os.path.exists', 'os.path.exists', (['root_folder'], {}), '(root_folder)\n', (8876, 8889), False, 'import os\n'), ((10504, 10526), 'tpDcc.libs.python.path.exists', 'path.exists', (['directory'], {}), '(directory)\n', (10515, 10526), False, 'from tpDcc.libs.python import path\n'), ((10842, 10864), 'tpDcc.libs.python.folder.startswith', 'folder.startswith', (['"""."""'], {}), "('.')\n", (10859, 10864), False, 'from tpDcc.libs.python import path, fileio, folder\n'), ((10910, 10943), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['directory', 'folder'], {}), '(directory, folder)\n', (10924, 10943), False, 'from tpDcc.libs.python import path\n'), ((11688, 11712), 'tpDcc.libs.python.path.is_dir', 'path.is_dir', (['root_folder'], {}), '(root_folder)\n', (11699, 11712), False, 'from tpDcc.libs.python import path\n'), ((11904, 11924), 'os.walk', 'os.walk', (['root_folder'], {}), '(root_folder)\n', (11911, 11924), False, 'import os\n'), ((12184, 12207), 'os.listdir', 'os.listdir', (['root_folder'], {}), '(root_folder)\n', (12194, 12207), False, 'import os\n'), ((12719, 12740), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (12729, 12740), False, 'import os\n'), ((13427, 13450), 'os.walk', 'os.walk', (['root_directory'], {}), '(root_directory)\n', (13434, 13450), False, 'import os\n'), ((14976, 15033), 'tpDcc.libs.python.fileio.get_files', 'fileio.get_files', (['root_directory'], {'filter_text': 'filter_text'}), '(root_directory, filter_text=filter_text)\n', (14992, 15033), False, 'from tpDcc.libs.python import fileio\n'), ((15519, 15562), 'subprocess.check_call', 'subprocess.check_call', (["['open', '--', path]"], {}), "(['open', '--', path])\n", (15540, 15562), False, 'import subprocess\n'), ((16441, 16462), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (16460, 16462), False, 'import tempfile\n'), ((16842, 16861), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (16855, 16861), False, 'import os\n'), ((17894, 17921), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (17908, 17921), False, 'import os\n'), ((19039, 19062), 'os.walk', 'os.walk', (['root_directory'], {}), '(root_directory)\n', (19046, 19062), False, 'import os\n'), ((19195, 19218), 'os.walk', 'os.walk', (['root_directory'], {}), '(root_directory)\n', (19202, 19218), False, 'import os\n'), ((2440, 2455), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2450, 2455), False, 'import time\n'), ((3430, 3453), 'tpDcc.libs.python.osplatform.is_windows', 'osplatform.is_windows', ([], {}), '()\n', (3451, 3453), False, 'from tpDcc.libs.python import name, path, osplatform\n'), ((3630, 3716), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell\n =True)\n', (3646, 3716), False, 'import subprocess\n'), ((3834, 3883), 'shutil.copytree', 'shutil.copytree', (['directory', 'directory_destination'], {}), '(directory, directory_destination)\n', (3849, 3883), False, 'import shutil\n'), ((4520, 4551), 'os.path.isdir', 'os.path.isdir', (['target_directory'], {}), '(target_directory)\n', (4533, 4551), False, 'import os\n'), ((4577, 4605), 'os.listdir', 'os.listdir', (['source_directory'], {}), '(source_directory)\n', (4587, 4605), False, 'import os\n'), ((5059, 5106), 'shutil.move', 'shutil.move', (['source_directory', 'target_directory'], {}), '(source_directory, target_directory)\n', (5070, 5106), False, 'import shutil\n'), ((7101, 7128), 'tpDcc.libs.python.folder.get_files', 'folder.get_files', (['directory'], {}), '(directory)\n', (7117, 7128), False, 'from tpDcc.libs.python import path, fileio, folder\n'), ((7218, 7250), 'tpDcc.libs.python.fileio.delete_file', 'fileio.delete_file', (['f', 'directory'], {}), '(f, directory)\n', (7236, 7250), False, 'from tpDcc.libs.python import fileio\n'), ((9771, 9791), 'os.walk', 'os.walk', (['root_folder'], {}), '(root_folder)\n', (9778, 9791), False, 'import os\n'), ((10739, 10769), 'tpDcc.libs.python.version.VersionFile', 'version.VersionFile', (['directory'], {}), '(directory)\n', (10758, 10769), False, 'from tpDcc.libs.python import path, version\n'), ((10982, 11026), 'os.path.relpath', 'os.path.relpath', (['folder_path', 'base_directory'], {}), '(folder_path, base_directory)\n', (10997, 11026), False, 'import os\n'), ((11955, 11990), 'fnmatch.filter', 'fnmatch.filter', (['file_names', 'pattern'], {}), '(file_names, pattern)\n', (11969, 11990), False, 'import fnmatch\n'), ((12256, 12286), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['root_folder', 'f'], {}), '(root_folder, f)\n', (12270, 12286), False, 'from tpDcc.libs.python import path\n'), ((12302, 12335), 'tpDcc.libs.python.path.is_file', 'path.is_file', ([], {'file_path': 'file_path'}), '(file_path=file_path)\n', (12314, 12335), False, 'from tpDcc.libs.python import path\n'), ((13859, 13885), 'os.listdir', 'os.listdir', (['root_directory'], {}), '(root_directory)\n', (13869, 13885), False, 'import os\n'), ((14021, 14061), 'os.path.splitext', 'os.path.splitext', (['filename_and_extension'], {}), '(filename_and_extension)\n', (14037, 14061), False, 'import os\n'), ((15606, 15642), 'subprocess.Popen', 'subprocess.Popen', (["['xdg-open', path]"], {}), "(['xdg-open', path])\n", (15622, 15642), False, 'import subprocess\n'), ((16229, 16252), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (16247, 16252), False, 'import os\n'), ((17412, 17435), 'os.listdir', 'os.listdir', (['root_folder'], {}), '(root_folder)\n', (17422, 17435), False, 'import os\n'), ((18033, 18070), 'os.makedirs', 'os.makedirs', (['folder_path', 'permissions'], {}), '(folder_path, permissions)\n', (18044, 18070), False, 'import os\n'), ((2481, 2514), 'os.rename', 'os.rename', (['directory', 'rename_path'], {}), '(directory, rename_path)\n', (2490, 2514), False, 'import os\n'), ((3959, 3998), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['ignore_patterns'], {}), '(ignore_patterns)\n', (3981, 3998), False, 'import shutil\n'), ((4660, 4693), 'os.path.join', 'os.path.join', (['source_directory', 'i'], {}), '(source_directory, i)\n', (4672, 4693), False, 'import os\n'), ((4717, 4750), 'os.path.join', 'os.path.join', (['target_directory', 'i'], {}), '(target_directory, i)\n', (4729, 4750), False, 'import os\n'), ((4770, 4790), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (4784, 4790), False, 'import os\n'), ((4998, 5032), 'shutil.move', 'shutil.move', (['src', 'target_directory'], {}), '(src, target_directory)\n', (5009, 5032), False, 'import shutil\n'), ((7988, 8014), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['root', 'name'], {}), '(root, name)\n', (8002, 8014), False, 'from tpDcc.libs.python import path\n'), ((9093, 9121), 'os.path.abspath', 'os.path.abspath', (['root_folder'], {}), '(root_folder)\n', (9108, 9121), False, 'import os\n'), ((13537, 13564), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (13553, 13564), False, 'import os\n'), ((14893, 14926), 'os.path.join', 'os.path.join', (['root_directory', 'fld'], {}), '(root_directory, fld)\n', (14905, 14926), False, 'import os\n'), ((15710, 15728), 'tpDcc.libs.python.path.endswith', 'path.endswith', (['"""/"""'], {}), "('/')\n", (15723, 15728), False, 'from tpDcc.libs.python import path\n'), ((15778, 15801), 'tpDcc.libs.python.path.replace', 'path.replace', (['"""/"""', '"""\\\\"""'], {}), "('/', '\\\\')\n", (15790, 15801), False, 'from tpDcc.libs.python import path\n'), ((16162, 16185), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (16180, 16185), False, 'import os\n'), ((17347, 17377), 'os.path.join', 'os.path.join', (['root_folder', 'fld'], {}), '(root_folder, fld)\n', (17359, 17377), False, 'import os\n'), ((18129, 18169), 'os.path.join', 'os.path.join', (['folder_path', '"""placeholder"""'], {}), "(folder_path, 'placeholder')\n", (18141, 18169), False, 'import os\n'), ((4815, 4834), 'os.path.isdir', 'os.path.isdir', (['dest'], {}), '(dest)\n', (4828, 4834), False, 'import os\n'), ((9643, 9663), 'os.walk', 'os.walk', (['root_folder'], {}), '(root_folder)\n', (9650, 9663), False, 'import os\n'), ((15827, 15885), 'subprocess.check_call', 'subprocess.check_call', (["['explorer', new_path]"], {'shell': '(False)'}), "(['explorer', new_path], shell=False)\n", (15848, 15885), False, 'import subprocess\n'), ((18193, 18219), 'os.path.exists', 'os.path.exists', (['place_path'], {}), '(place_path)\n', (18207, 18219), False, 'import os\n'), ((4966, 4981), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (4975, 4981), False, 'import os\n'), ((9896, 9919), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['root', 'd'], {}), '(root, d)\n', (9910, 9919), False, 'from tpDcc.libs.python import path\n'), ((10042, 10065), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['root', 'd'], {}), '(root, d)\n', (10056, 10065), False, 'from tpDcc.libs.python import path\n'), ((10104, 10145), 'os.path.relpath', 'os.path.relpath', (['folder_name', 'root_folder'], {}), '(folder_name, root_folder)\n', (10119, 10145), False, 'import os\n'), ((10184, 10212), 'tpDcc.libs.python.path.clean_path', 'path.clean_path', (['folder_name'], {}), '(folder_name)\n', (10199, 10212), False, 'from tpDcc.libs.python import path\n'), ((12055, 12090), 'tpDcc.libs.python.path.join_path', 'path.join_path', (['dir_path', 'file_name'], {}), '(dir_path, file_name)\n', (12069, 12090), False, 'from tpDcc.libs.python import path\n'), ((14357, 14409), 'os.path.join', 'os.path.join', (['root_directory', 'filename_and_extension'], {}), '(root_directory, filename_and_extension)\n', (14369, 14409), False, 'import os\n'), ((2578, 2600), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2598, 2600), False, 'import traceback\n'), ((13776, 13815), 'os.path.join', 'os.path.join', (['root_directory', 'file_name'], {}), '(root_directory, file_name)\n', (13788, 13815), False, 'import os\n')]
|
import bisect
import operator
import numpy as np
import torch
from torch.utils import data
from multilayer_perceptron import *
from utils import *
def preprocess_weights(weights):
w_later = np.abs(weights[-1])
w_input = np.abs(weights[0])
for i in range(len(weights) - 2, 0, -1):
w_later = np.matmul(w_later, np.abs(weights[i]))
return w_input, w_later
def make_one_indexed(interaction_ranking):
return [(tuple(np.array(i) + 1), s) for i, s in interaction_ranking]
def interpret_interactions(w_input, w_later, get_main_effects=False):
interaction_strengths = {}
for i in range(w_later.shape[1]):
sorted_hweights = sorted(
enumerate(w_input[i]), key=lambda x: x[1], reverse=True
)
interaction_candidate = []
candidate_weights = []
for j in range(w_input.shape[1]):
bisect.insort(interaction_candidate, sorted_hweights[j][0])
candidate_weights.append(sorted_hweights[j][1])
if not get_main_effects and len(interaction_candidate) == 1:
continue
interaction_tup = tuple(interaction_candidate)
if interaction_tup not in interaction_strengths:
interaction_strengths[interaction_tup] = 0
interaction_strength = (min(candidate_weights)) * (np.sum(w_later[:, i]))
interaction_strengths[interaction_tup] += interaction_strength
interaction_ranking = sorted(
interaction_strengths.items(), key=operator.itemgetter(1), reverse=True
)
return interaction_ranking
def interpret_pairwise_interactions(w_input, w_later):
p = w_input.shape[1]
interaction_ranking = []
for i in range(p):
for j in range(p):
if i < j:
strength = (np.minimum(w_input[:, i], w_input[:, j]) * w_later).sum()
interaction_ranking.append(((i, j), strength))
interaction_ranking.sort(key=lambda x: x[1], reverse=True)
return interaction_ranking
def get_interactions(weights, pairwise=False, one_indexed=False):
w_input, w_later = preprocess_weights(weights)
if pairwise:
interaction_ranking = interpret_pairwise_interactions(w_input, w_later)
else:
interaction_ranking = interpret_interactions(w_input, w_later)
interaction_ranking = prune_redundant_interactions(interaction_ranking)
if one_indexed:
return make_one_indexed(interaction_ranking)
else:
return interaction_ranking
def prune_redundant_interactions(interaction_ranking, max_interactions=100):
interaction_ranking_pruned = []
current_superset_inters = []
for inter, strength in interaction_ranking:
set_inter = set(inter)
if len(interaction_ranking_pruned) >= max_interactions:
break
subset_inter_skip = False
update_superset_inters = []
for superset_inter in current_superset_inters:
if set_inter < superset_inter:
subset_inter_skip = True
break
elif not (set_inter > superset_inter):
update_superset_inters.append(superset_inter)
if subset_inter_skip:
continue
current_superset_inters = update_superset_inters
current_superset_inters.append(set_inter)
interaction_ranking_pruned.append((inter, strength))
return interaction_ranking_pruned
def detect_interactions(
Xd,
Yd,
arch=[256, 128, 64],
batch_size=100,
device=torch.device("cpu"),
seed=None,
**kwargs
):
if seed is not None:
set_seed(seed)
data_loaders = convert_to_torch_loaders(Xd, Yd, batch_size)
model = create_mlp([feats.shape[1]] + arch + [1]).to(device)
model, mlp_loss = train(model, data_loaders, device=device, **kwargs)
inters = get_interactions(get_weights(model))
return inters, mlp_loss
|
[
"numpy.minimum",
"numpy.abs",
"numpy.sum",
"numpy.array",
"torch.device",
"operator.itemgetter",
"bisect.insort"
] |
[((196, 215), 'numpy.abs', 'np.abs', (['weights[-1]'], {}), '(weights[-1])\n', (202, 215), True, 'import numpy as np\n'), ((230, 248), 'numpy.abs', 'np.abs', (['weights[0]'], {}), '(weights[0])\n', (236, 248), True, 'import numpy as np\n'), ((3519, 3538), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3531, 3538), False, 'import torch\n'), ((332, 350), 'numpy.abs', 'np.abs', (['weights[i]'], {}), '(weights[i])\n', (338, 350), True, 'import numpy as np\n'), ((872, 931), 'bisect.insort', 'bisect.insort', (['interaction_candidate', 'sorted_hweights[j][0]'], {}), '(interaction_candidate, sorted_hweights[j][0])\n', (885, 931), False, 'import bisect\n'), ((1509, 1531), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1528, 1531), False, 'import operator\n'), ((1333, 1354), 'numpy.sum', 'np.sum', (['w_later[:, i]'], {}), '(w_later[:, i])\n', (1339, 1354), True, 'import numpy as np\n'), ((445, 456), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (453, 456), True, 'import numpy as np\n'), ((1796, 1836), 'numpy.minimum', 'np.minimum', (['w_input[:, i]', 'w_input[:, j]'], {}), '(w_input[:, i], w_input[:, j])\n', (1806, 1836), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2019 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from ..plugins.i_result_handler_plugin import IResultHandlerPlugin
from ..result import (
ResultCollector, TestResult, TestCompletionStatus, TestDuration
)
from ..testing import unittest
from . import _test_cases, _test_case_data
from .fixtures import ExcInfoFixture, MockDateTime
from .compat import mock
class TestTextTestResult(ExcInfoFixture, unittest.TestCase):
def test_result_collector_calls_handlers_start_stop_methods(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
case = _test_cases.TestCase('test_method')
# When
handler.reset_mock()
collector.startTestRun()
# Then
handler.start_test_run.assert_called_once_with()
self.assertFalse(handler.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
# When
handler.reset_mock()
collector.stopTestRun()
# Then
handler.stop_test_run.assert_called_once_with()
self.assertFalse(handler.called)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
# When
handler.reset_mock()
collector.startTest(case)
# Then
handler.start_test.assert_called_once_with(case)
self.assertFalse(handler.called)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.stop_test.called)
# When
handler.reset_mock()
collector.stopTest(case)
# Then
handler.stop_test.assert_called_once_with(case)
self.assertFalse(handler.called)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
def test_unicode_traceback(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
msg = '\N{GREEK SMALL LETTER PHI}'.encode('utf-8')
with self.failure_exc_info(msg) as exc_info:
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.error, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addError(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_error(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# When
with self.exc_info(RuntimeError) as exc_info:
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.error, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addError(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_failure(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
with self.failure_exc_info() as exc_info:
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.failure, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addFailure(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_success(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.success, expected_duration)
# When
with mock.patch('haas.result.datetime', new=MockDateTime(end_time)):
collector.addSuccess(case)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertTrue(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_skip(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.skipped, expected_duration,
message='reason')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(end_time)):
collector.addSkip(case, 'reason')
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertTrue(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_expected_fail(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
with self.exc_info(RuntimeError) as exc_info:
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.expected_failure, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addExpectedFailure(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertTrue(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_unexpected_success(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.unexpected_success, expected_duration)
# When
with mock.patch('haas.result.datetime', new=MockDateTime(end_time)):
collector.addUnexpectedSuccess(case)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_should_stop(self):
# Given
collector = ResultCollector()
# Then
self.assertFalse(collector.shouldStop)
# When
collector.stop()
# Then
self.assertTrue(collector.shouldStop)
def test_multiple_errors_from_one_test(self):
# Given
collector = ResultCollector()
case = _test_case_data.TestWithTwoErrors('test_with_two_errors')
start_time = datetime(2016, 4, 12, 8, 17, 32)
test_end_time = datetime(2016, 4, 12, 8, 17, 38)
tear_down_end_time = datetime(2016, 4, 12, 8, 17, 39)
# When
with mock.patch(
'haas.result.datetime',
new=MockDateTime(
[start_time, test_end_time, tear_down_end_time])):
case.run(collector)
# Then
self.assertEqual(len(collector.errors), 2)
|
[
"datetime.timedelta",
"datetime.datetime"
] |
[((2590, 2623), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (2598, 2623), False, 'from datetime import datetime, timedelta\n'), ((2643, 2664), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (2652, 2664), False, 'from datetime import datetime, timedelta\n'), ((4100, 4133), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (4108, 4133), False, 'from datetime import datetime, timedelta\n'), ((4153, 4174), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (4162, 4174), False, 'from datetime import datetime, timedelta\n'), ((5574, 5607), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (5582, 5607), False, 'from datetime import datetime, timedelta\n'), ((5627, 5648), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (5636, 5648), False, 'from datetime import datetime, timedelta\n'), ((7029, 7062), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (7037, 7062), False, 'from datetime import datetime, timedelta\n'), ((7082, 7103), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (7091, 7103), False, 'from datetime import datetime, timedelta\n'), ((8343, 8376), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (8351, 8376), False, 'from datetime import datetime, timedelta\n'), ((8396, 8417), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (8405, 8417), False, 'from datetime import datetime, timedelta\n'), ((9703, 9736), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (9711, 9736), False, 'from datetime import datetime, timedelta\n'), ((9756, 9777), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (9765, 9777), False, 'from datetime import datetime, timedelta\n'), ((11189, 11222), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(23)', '(8)', '(14)', '(12)'], {}), '(2015, 12, 23, 8, 14, 12)\n', (11197, 11222), False, 'from datetime import datetime, timedelta\n'), ((11242, 11263), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (11251, 11263), False, 'from datetime import datetime, timedelta\n'), ((12758, 12790), 'datetime.datetime', 'datetime', (['(2016)', '(4)', '(12)', '(8)', '(17)', '(32)'], {}), '(2016, 4, 12, 8, 17, 32)\n', (12766, 12790), False, 'from datetime import datetime, timedelta\n'), ((12815, 12847), 'datetime.datetime', 'datetime', (['(2016)', '(4)', '(12)', '(8)', '(17)', '(38)'], {}), '(2016, 4, 12, 8, 17, 38)\n', (12823, 12847), False, 'from datetime import datetime, timedelta\n'), ((12877, 12909), 'datetime.datetime', 'datetime', (['(2016)', '(4)', '(12)', '(8)', '(17)', '(39)'], {}), '(2016, 4, 12, 8, 17, 39)\n', (12885, 12909), False, 'from datetime import datetime, timedelta\n')]
|
from django.db import models
from account.models import Country
from django.contrib import admin
grad_streams_list = [
'Engineering',
'Law',
'Medicine',
'Business',
]
grad_streams = (
('Engineering', 'Engineering'),
('Law', 'Law'),
('Medicine', 'Medicine'),
('Business', 'Business'),
)
class GRE(models.Model):
verbal = models.IntegerField(default=None, null=True, blank=True)
quant = models.IntegerField(default=None, null=True, blank=True)
awa = models.FloatField(default=None, null=True, blank=True)
def __str__(self):
return str(self.verbal)
class MCAT(models.Model):
old_total = models.IntegerField(default=None, null=True, blank=True)
new_total = models.IntegerField(default=None, null=True, blank=True)
chemical_physical = models.IntegerField(default=None, null=True, blank=True)
critical_analysis = models.IntegerField(default=None, null=True, blank=True)
biologic_biochemical = models.IntegerField(default=None, null=True, blank=True)
psycho_social_biological = models.IntegerField(default=None, null=True, blank=True)
def __str__(self):
return str(self.new_total)
class University(models.Model):
name = models.TextField(default=None)
info_link = models.TextField(default=None, null=True)
rank = models.IntegerField(default=None, null=True, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
total_students = models.IntegerField(default=None, null=True, blank=True)
total_int_students = models.IntegerField(default=None, null=True, blank=True)
address = models.TextField(default=None, null=True, blank=True)
website = models.TextField(default=None, null=True, blank=True, max_length=500)
schools = models.TextField(default=None, null=True, blank=True)
uni_type = models.TextField(default=None, null=True, blank=True)
grad_school_link = models.TextField(default=None, null=True, blank=True, max_length=500)
undergrad_link = models.TextField(default=None, null=True, blank=True, max_length=500)
business_link = models.TextField(default=None, null=True, blank=True, max_length=500)
med_link = models.TextField(default=None, null=True, blank=True, max_length=500)
law_link = models.TextField(default=None, null=True, blank=True, max_length=500)
engg_link = models.TextField(default=None, null=True, blank=True, max_length=500)
slug = models.SlugField(default=None, null=True, blank=True, max_length=500)
logo = models.TextField(default=None, null=True, blank=True, max_length=500)
def __str__(self):
return self.name
class UniversityAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('rank',)
class BusinessGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE)
enrollment = models.IntegerField(default=None, null=True, blank=True)
international = models.FloatField(default=None, null=True, blank=True)
male = models.FloatField(default=None, null=True, blank=True)
female = models.FloatField(default=None, null=True, blank=True)
acceptance_rate_masters = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_phd = models.FloatField(default=None, null=True, blank=True) #
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
tuition = models.FloatField(default=None, null=True, blank=True)
us_deadline = models.DateTimeField(default=None, null=True, blank=True)
int_deadline = models.DateTimeField(default=None, null=True, blank=True)
rolling = models.BooleanField(default=False)
gpa = models.FloatField(default=None, null=True, blank=True)
min_toefl_score = models.IntegerField(default=None, null=True, blank=True)
mean_toefl_score = models.IntegerField(default=None, null=True, blank=True)
min_ielts_score = models.FloatField(default=None, null=True, blank=True)
fin_aid_director_name = models.TextField(default=None, null=True, blank=True)
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True)
fellowships = models.IntegerField(default=None, null=True, blank=True)
teaching_assistantships = models.IntegerField(default=None, null=True, blank=True)
research_assistantships = models.IntegerField(default=None, null=True, blank=True)
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True)
# unique to business
employed = models.FloatField(default=None, null=True, blank=True)
employed_3_months = models.FloatField(default=None, null=True, blank=True)
avg_work_ex_months = models.IntegerField(default=None, null=True, blank=True)
gmat = models.IntegerField(default=None, null=True, blank=True)
gre = models.OneToOneField(GRE, on_delete=models.CASCADE) #
avg_salary = models.IntegerField(default=None, null=True, blank=True)
def __str__(self):
return self.university.name
class BusinessGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
class EngineeringGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE) #
enrollment = models.IntegerField(default=None, null=True, blank=True) #
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
international = models.FloatField(default=None, null=True, blank=True) #
male = models.FloatField(default=None, null=True, blank=True) #
female = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_masters = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_phd = models.FloatField(default=None, null=True, blank=True) #
tuition = models.FloatField(default=None, null=True, blank=True) #
us_deadline = models.DateTimeField(default=None, null=True, blank=True) #
int_deadline = models.DateTimeField(default=None, null=True, blank=True) #
rolling = models.BooleanField(default=False) #
gpa = models.FloatField(default=None, null=True, blank=True) #
min_toefl_score = models.IntegerField(default=None, null=True, blank=True) #
mean_toefl_score = models.IntegerField(default=None, null=True, blank=True) #
min_ielts_score = models.FloatField(default=None, null=True, blank=True) #
fin_aid_director_name = models.TextField(default=None, null=True, blank=True) #
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True) #
fellowships = models.IntegerField(default=None, null=True, blank=True)
teaching_assistantships = models.IntegerField(default=None, null=True, blank=True) #
research_assistantships = models.IntegerField(default=None, null=True, blank=True) #
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True) #
# unique to engineering
gre = models.OneToOneField(GRE, on_delete=models.CASCADE, null=True, blank=True) #
def __str__(self):
return self.university.name
class EngineeringGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
class MedicineGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE)
enrollment = models.IntegerField(default=None, null=True, blank=True)
international = models.FloatField(default=None, null=True, blank=True)
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
acceptance_rate_masters = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_phd = models.FloatField(default=None, null=True, blank=True) #
male = models.FloatField(default=None, null=True, blank=True)
female = models.FloatField(default=None, null=True, blank=True)
tuition = models.FloatField(default=None, null=True, blank=True)
us_deadline = models.DateTimeField(default=None, null=True, blank=True)
int_deadline = models.DateTimeField(default=None, null=True, blank=True)
rolling = models.BooleanField(default=False)
gpa = models.FloatField(default=None, null=True, blank=True) #
fin_aid_director_name = models.TextField(default=None, null=True, blank=True)
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True)
students_receiving_aid = models.FloatField(default=None, null=True, blank=True)
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True)
# unique to medicine
mcat = models.OneToOneField(MCAT, on_delete=models.CASCADE)
def __str__(self):
return self.university.name
class MedicineGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
class LawGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE)
enrollment = models.IntegerField(default=None, null=True, blank=True)
international = models.FloatField(default=None, null=True, blank=True)
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
male = models.FloatField(default=None, null=True, blank=True)
female = models.FloatField(default=None, null=True, blank=True)
acceptance_rate = models.FloatField(default=None, null=True, blank=True)
tuition = models.FloatField(default=None, null=True, blank=True)
us_deadline = models.DateTimeField(default=None, null=True, blank=True)
int_deadline = models.DateTimeField(default=None, null=True, blank=True)
rolling = models.BooleanField(default=False)
int_rolling = models.BooleanField(default=False)
employed = models.FloatField(default=None, null=True, blank=True)
fin_aid_director_name = models.TextField(default=None, null=True, blank=True)
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True)
students_receiving_aid = models.FloatField(default=None, null=True, blank=True)
gpa = models.FloatField(default=None, null=True, blank=True) #
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True)
# unique to law
# look for median lsat
employed = models.FloatField(default=None, null=True, blank=True)
bar_passage_rate = models.FloatField(default=None, null=True, blank=True)
median_grant = models.IntegerField(default=None, null=True, blank=True)
lsat_score = models.IntegerField(default=None, null=True, blank=True)
median_public_salary = models.IntegerField(default=None, null=True, blank=True)
median_private_salary = models.IntegerField(default=None, null=True, blank=True)
def __str__(self):
return self.university.name
class LawGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.SlugField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((362, 418), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (381, 418), False, 'from django.db import models\n'), ((433, 489), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (452, 489), False, 'from django.db import models\n'), ((504, 558), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (521, 558), False, 'from django.db import models\n'), ((677, 733), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (696, 733), False, 'from django.db import models\n'), ((768, 824), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (787, 824), False, 'from django.db import models\n'), ((859, 915), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (878, 915), False, 'from django.db import models\n'), ((950, 1006), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (969, 1006), False, 'from django.db import models\n'), ((1041, 1097), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1060, 1097), False, 'from django.db import models\n'), ((1132, 1188), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1151, 1188), False, 'from django.db import models\n'), ((1312, 1342), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None'}), '(default=None)\n', (1328, 1342), False, 'from django.db import models\n'), ((1373, 1414), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (1389, 1414), False, 'from django.db import models\n'), ((1445, 1501), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1464, 1501), False, 'from django.db import models\n'), ((1532, 1584), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Country'], {'on_delete': 'models.CASCADE'}), '(Country, on_delete=models.CASCADE)\n', (1549, 1584), False, 'from django.db import models\n'), ((1615, 1671), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1634, 1671), False, 'from django.db import models\n'), ((1702, 1758), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1721, 1758), False, 'from django.db import models\n'), ((1789, 1842), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1805, 1842), False, 'from django.db import models\n'), ((1873, 1942), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (1889, 1942), False, 'from django.db import models\n'), ((1973, 2026), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (1989, 2026), False, 'from django.db import models\n'), ((2057, 2110), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (2073, 2110), False, 'from django.db import models\n'), ((2141, 2210), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2157, 2210), False, 'from django.db import models\n'), ((2241, 2310), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2257, 2310), False, 'from django.db import models\n'), ((2341, 2410), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2357, 2410), False, 'from django.db import models\n'), ((2441, 2510), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2457, 2510), False, 'from django.db import models\n'), ((2541, 2610), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2557, 2610), False, 'from django.db import models\n'), ((2641, 2710), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2657, 2710), False, 'from django.db import models\n'), ((2741, 2810), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2757, 2810), False, 'from django.db import models\n'), ((2841, 2910), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)', 'max_length': '(500)'}), '(default=None, null=True, blank=True, max_length=500)\n', (2857, 2910), False, 'from django.db import models\n'), ((3124, 3182), 'django.db.models.OneToOneField', 'models.OneToOneField', (['University'], {'on_delete': 'models.CASCADE'}), '(University, on_delete=models.CASCADE)\n', (3144, 3182), False, 'from django.db import models\n'), ((3213, 3269), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3232, 3269), False, 'from django.db import models\n'), ((3300, 3354), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3317, 3354), False, 'from django.db import models\n'), ((3385, 3439), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3402, 3439), False, 'from django.db import models\n'), ((3470, 3524), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3487, 3524), False, 'from django.db import models\n'), ((3555, 3609), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3572, 3609), False, 'from django.db import models\n'), ((3643, 3697), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3660, 3697), False, 'from django.db import models\n'), ((3731, 3787), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3750, 3787), False, 'from django.db import models\n'), ((3821, 3877), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3840, 3877), False, 'from django.db import models\n'), ((3911, 3965), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3928, 3965), False, 'from django.db import models\n'), ((3996, 4053), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4016, 4053), False, 'from django.db import models\n'), ((4084, 4141), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4104, 4141), False, 'from django.db import models\n'), ((4172, 4206), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4191, 4206), False, 'from django.db import models\n'), ((4237, 4291), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4254, 4291), False, 'from django.db import models\n'), ((4322, 4378), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4341, 4378), False, 'from django.db import models\n'), ((4409, 4465), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4428, 4465), False, 'from django.db import models\n'), ((4496, 4550), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4513, 4550), False, 'from django.db import models\n'), ((4581, 4634), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4597, 4634), False, 'from django.db import models\n'), ((4665, 4718), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4681, 4718), False, 'from django.db import models\n'), ((4749, 4805), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4768, 4805), False, 'from django.db import models\n'), ((4836, 4892), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4855, 4892), False, 'from django.db import models\n'), ((4923, 4979), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4942, 4979), False, 'from django.db import models\n'), ((5040, 5096), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (5059, 5096), False, 'from django.db import models\n'), ((5153, 5207), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (5170, 5207), False, 'from django.db import models\n'), ((5238, 5292), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (5255, 5292), False, 'from django.db import models\n'), ((5323, 5379), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (5342, 5379), False, 'from django.db import models\n'), ((5410, 5466), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (5429, 5466), False, 'from django.db import models\n'), ((5497, 5548), 'django.db.models.OneToOneField', 'models.OneToOneField', (['GRE'], {'on_delete': 'models.CASCADE'}), '(GRE, on_delete=models.CASCADE)\n', (5517, 5548), False, 'from django.db import models\n'), ((5582, 5638), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (5601, 5638), False, 'from django.db import models\n'), ((5892, 5950), 'django.db.models.OneToOneField', 'models.OneToOneField', (['University'], {'on_delete': 'models.CASCADE'}), '(University, on_delete=models.CASCADE)\n', (5912, 5950), False, 'from django.db import models\n'), ((5984, 6040), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6003, 6040), False, 'from django.db import models\n'), ((6074, 6130), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6093, 6130), False, 'from django.db import models\n'), ((6164, 6220), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6183, 6220), False, 'from django.db import models\n'), ((6254, 6308), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6271, 6308), False, 'from django.db import models\n'), ((6342, 6396), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6359, 6396), False, 'from django.db import models\n'), ((6430, 6484), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6447, 6484), False, 'from django.db import models\n'), ((6518, 6572), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6535, 6572), False, 'from django.db import models\n'), ((6606, 6660), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6623, 6660), False, 'from django.db import models\n'), ((6694, 6748), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6711, 6748), False, 'from django.db import models\n'), ((6782, 6839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6802, 6839), False, 'from django.db import models\n'), ((6873, 6930), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (6893, 6930), False, 'from django.db import models\n'), ((6964, 6998), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6983, 6998), False, 'from django.db import models\n'), ((7032, 7086), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7049, 7086), False, 'from django.db import models\n'), ((7120, 7176), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7139, 7176), False, 'from django.db import models\n'), ((7210, 7266), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7229, 7266), False, 'from django.db import models\n'), ((7300, 7354), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7317, 7354), False, 'from django.db import models\n'), ((7388, 7441), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7404, 7441), False, 'from django.db import models\n'), ((7475, 7528), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7491, 7528), False, 'from django.db import models\n'), ((7562, 7618), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7581, 7618), False, 'from django.db import models\n'), ((7649, 7705), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7668, 7705), False, 'from django.db import models\n'), ((7739, 7795), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7758, 7795), False, 'from django.db import models\n'), ((7859, 7915), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (7878, 7915), False, 'from django.db import models\n'), ((7978, 8052), 'django.db.models.OneToOneField', 'models.OneToOneField', (['GRE'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'blank': '(True)'}), '(GRE, on_delete=models.CASCADE, null=True, blank=True)\n', (7998, 8052), False, 'from django.db import models\n'), ((8309, 8367), 'django.db.models.OneToOneField', 'models.OneToOneField', (['University'], {'on_delete': 'models.CASCADE'}), '(University, on_delete=models.CASCADE)\n', (8329, 8367), False, 'from django.db import models\n'), ((8398, 8454), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8417, 8454), False, 'from django.db import models\n'), ((8485, 8539), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8502, 8539), False, 'from django.db import models\n'), ((8570, 8626), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8589, 8626), False, 'from django.db import models\n'), ((8660, 8716), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8679, 8716), False, 'from django.db import models\n'), ((8750, 8804), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8767, 8804), False, 'from django.db import models\n'), ((8838, 8892), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8855, 8892), False, 'from django.db import models\n'), ((8926, 8980), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (8943, 8980), False, 'from django.db import models\n'), ((9011, 9065), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9028, 9065), False, 'from django.db import models\n'), ((9096, 9150), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9113, 9150), False, 'from django.db import models\n'), ((9181, 9238), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9201, 9238), False, 'from django.db import models\n'), ((9269, 9326), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9289, 9326), False, 'from django.db import models\n'), ((9357, 9391), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (9376, 9391), False, 'from django.db import models\n'), ((9422, 9476), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9439, 9476), False, 'from django.db import models\n'), ((9510, 9563), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9526, 9563), False, 'from django.db import models\n'), ((9594, 9647), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9610, 9647), False, 'from django.db import models\n'), ((9678, 9732), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9695, 9732), False, 'from django.db import models\n'), ((9794, 9850), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (9813, 9850), False, 'from django.db import models\n'), ((9907, 9959), 'django.db.models.OneToOneField', 'models.OneToOneField', (['MCAT'], {'on_delete': 'models.CASCADE'}), '(MCAT, on_delete=models.CASCADE)\n', (9927, 9959), False, 'from django.db import models\n'), ((10205, 10263), 'django.db.models.OneToOneField', 'models.OneToOneField', (['University'], {'on_delete': 'models.CASCADE'}), '(University, on_delete=models.CASCADE)\n', (10225, 10263), False, 'from django.db import models\n'), ((10294, 10350), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10313, 10350), False, 'from django.db import models\n'), ((10381, 10435), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10398, 10435), False, 'from django.db import models\n'), ((10466, 10522), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10485, 10522), False, 'from django.db import models\n'), ((10556, 10612), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10575, 10612), False, 'from django.db import models\n'), ((10646, 10700), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10663, 10700), False, 'from django.db import models\n'), ((10731, 10785), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10748, 10785), False, 'from django.db import models\n'), ((10816, 10870), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10833, 10870), False, 'from django.db import models\n'), ((10901, 10955), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (10918, 10955), False, 'from django.db import models\n'), ((10986, 11043), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11006, 11043), False, 'from django.db import models\n'), ((11074, 11131), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11094, 11131), False, 'from django.db import models\n'), ((11162, 11196), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (11181, 11196), False, 'from django.db import models\n'), ((11227, 11261), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (11246, 11261), False, 'from django.db import models\n'), ((11292, 11346), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11309, 11346), False, 'from django.db import models\n'), ((11377, 11430), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11393, 11430), False, 'from django.db import models\n'), ((11461, 11514), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11477, 11514), False, 'from django.db import models\n'), ((11545, 11599), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11562, 11599), False, 'from django.db import models\n'), ((11630, 11684), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11647, 11684), False, 'from django.db import models\n'), ((11749, 11805), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11768, 11805), False, 'from django.db import models\n'), ((11884, 11938), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11901, 11938), False, 'from django.db import models\n'), ((11969, 12023), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (11986, 12023), False, 'from django.db import models\n'), ((12054, 12110), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (12073, 12110), False, 'from django.db import models\n'), ((12141, 12197), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (12160, 12197), False, 'from django.db import models\n'), ((12228, 12284), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (12247, 12284), False, 'from django.db import models\n'), ((12315, 12371), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (12334, 12371), False, 'from django.db import models\n')]
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation
from keras.layers.core import Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.utils import to_categorical
trainFile = pd.read_csv('./dataset/train.csv').drop(columns="datasetId")
testFile = pd.read_csv('./dataset/test.csv').drop(columns="datasetId")
# train
train_samples = trainFile.drop(columns='condition').to_numpy()
train_labels = trainFile['condition'].to_numpy()
# test
test_samples = testFile.drop(columns='condition').to_numpy()
test_labels = testFile['condition'].to_numpy()
# normalizing features
scaler = MinMaxScaler(feature_range=(0, 1))
train_samples = scaler.fit_transform(train_samples)
test_samples = scaler.fit_transform(test_samples)
# one-hot-encoding labels
one_hot_encoder = OneHotEncoder(categories='auto')
train_labels = one_hot_encoder.fit_transform(train_labels.reshape(-1, 1)).toarray()
test_labels = one_hot_encoder.fit_transform(test_labels.reshape(-1, 1)).toarray()
# build the model
model = Sequential([
Dense(34, input_shape=[34, ], activation='relu'),
Dense(20, activation='relu'),
Dense(10, activation='relu'),
Dense(3, activation='softmax')
])
print(model.summary())
model.compile(Adam(lr=.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_samples, train_labels, validation_split=0.1, batch_size=10, epochs=10, shuffle=True, verbose=2)
model.save('model.h5')
predictions = model.predict(test_samples)
print(predictions)
np.savetxt('predictions.csv', test_samples, delimiter=",")
|
[
"keras.layers.core.Dense",
"pandas.read_csv",
"numpy.savetxt",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.OneHotEncoder",
"keras.optimizers.Adam"
] |
[((826, 860), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (838, 860), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1008, 1040), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (1021, 1040), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1746, 1804), 'numpy.savetxt', 'np.savetxt', (['"""predictions.csv"""', 'test_samples'], {'delimiter': '""","""'}), "('predictions.csv', test_samples, delimiter=',')\n", (1756, 1804), True, 'import numpy as np\n'), ((1446, 1461), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1450, 1461), False, 'from keras.optimizers import Adam\n'), ((424, 458), 'pandas.read_csv', 'pd.read_csv', (['"""./dataset/train.csv"""'], {}), "('./dataset/train.csv')\n", (435, 458), True, 'import pandas as pd\n'), ((496, 529), 'pandas.read_csv', 'pd.read_csv', (['"""./dataset/test.csv"""'], {}), "('./dataset/test.csv')\n", (507, 529), True, 'import pandas as pd\n'), ((1251, 1297), 'keras.layers.core.Dense', 'Dense', (['(34)'], {'input_shape': '[34]', 'activation': '"""relu"""'}), "(34, input_shape=[34], activation='relu')\n", (1256, 1297), False, 'from keras.layers.core import Dense\n'), ((1305, 1333), 'keras.layers.core.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (1310, 1333), False, 'from keras.layers.core import Dense\n'), ((1339, 1367), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (1344, 1367), False, 'from keras.layers.core import Dense\n'), ((1373, 1403), 'keras.layers.core.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (1378, 1403), False, 'from keras.layers.core import Dense\n')]
|
###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## <NAME> ##
## <NAME> ##
## ##
###################################################
import os,sys
import xml.etree.cElementTree as ET
import xml.dom.minidom as minidom
import random
def write_mesmer_input(species,barriers,products):
root = ET.Element( 'me:mesmer',{'xmlns':'http://www.xml-cml.org/schema',
'xmlns:me':'http://www.chem.leeds.ac.uk/mesmer',
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-instance'})
title = ET.SubElement(root,'me:title').text = 'species.chemid'
mollist = ET.SubElement(root,'moleculeList')
#write the initial species
atom = ['C','C','C','H','H','H','H','H','H']
natom = len(atom)
rad = [0 for ai in atom]
charge = 0
addMolecule(mollist, species, atom, natom, rad, charge)
#Todo: write the products and tss to the mollist
reaclist = ET.SubElement(root,'reactionList')
#write the reactions
for index, instance in enumerate(species.reac_inst):
addReaction(reaclist, species, index, instance)
st = ET.tostring(root,'utf-8')
st = minidom.parseString(st)
fout = open('test.xml','w')
fout.write(st.toprettyxml(indent = ' '))
fout.close()
#write st.toprettyxml(indent = ' ')
#tree.write('test.xml', encoding='utf-8',xml_declaration=True)
def addReaction(reaclist, species, index, instance):
a = 1
def addMolecule(mollist,mol, atom, natom, rad, charge):
geom = []
for i,at in enumerate(atom):
geom.append([random.uniform(-3.,3.), random.uniform(-3.,3.), random.uniform(-3.,3.)])
bond = [
[0,2,0,1,1,0,0,0,0],
[2,0,1,0,0,1,0,0,0],
[0,1,0,0,0,0,1,1,1],
[1,0,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0]
]
molecule = ET.SubElement(mollist, 'molecule', {'id':'species.chemid','spinMultiplicity':'{}'.format(sum(rad))})
atomarray = ET.SubElement(molecule, 'atomArray')
for i,at in enumerate(atom):
args = {'id':'a{}'.format(i+1)}
args['elementType'] = at
args['x3'] = '{:.8f}'.format(geom[i][0])
args['y3'] = '{:.8f}'.format(geom[i][1])
args['z3'] = '{:.8f}'.format(geom[i][2])
at = ET.SubElement(atomarray, 'atom', args)
bond_id = 1
bondarray = ET.SubElement(molecule, 'bondArray')
for i in range(len(atom)-1):
for j in range(i+1,len(atom)):
if bond[i][j] > 0:
args = {'id':'b{}'.format(bond_id)}
args['atomRefs2']="a{} a{}".format(i+1,j+1)
args['order']="{}".format(bond[i][j])
b = ET.SubElement(bondarray,'bond',args)
bond_id += 1
propertylist = ET.SubElement(molecule, 'propertyList')
#add the zpe
property = ET.SubElement(propertylist, 'property', {'dictRef':'me:ZPE'})
scalar = ET.SubElement(property, 'scalar', {'units':'cm-1'}).text = str(15.5)
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
species = 'a'
barriers = ['b1','b2']
products = [['p1','p2'],['p3']]
write_mesmer_input(species,barriers,products)
|
[
"xml.dom.minidom.parseString",
"random.uniform",
"xml.etree.cElementTree.tostring",
"xml.etree.cElementTree.Element",
"xml.etree.cElementTree.SubElement"
] |
[((1131, 1314), 'xml.etree.cElementTree.Element', 'ET.Element', (['"""me:mesmer"""', "{'xmlns': 'http://www.xml-cml.org/schema', 'xmlns:me':\n 'http://www.chem.leeds.ac.uk/mesmer', 'xmlns:xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'}"], {}), "('me:mesmer', {'xmlns': 'http://www.xml-cml.org/schema',\n 'xmlns:me': 'http://www.chem.leeds.ac.uk/mesmer', 'xmlns:xsi':\n 'http://www.w3.org/2001/XMLSchema-instance'})\n", (1141, 1314), True, 'import xml.etree.cElementTree as ET\n'), ((1439, 1474), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['root', '"""moleculeList"""'], {}), "(root, 'moleculeList')\n", (1452, 1474), True, 'import xml.etree.cElementTree as ET\n'), ((1763, 1798), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['root', '"""reactionList"""'], {}), "(root, 'reactionList')\n", (1776, 1798), True, 'import xml.etree.cElementTree as ET\n'), ((1950, 1976), 'xml.etree.cElementTree.tostring', 'ET.tostring', (['root', '"""utf-8"""'], {}), "(root, 'utf-8')\n", (1961, 1976), True, 'import xml.etree.cElementTree as ET\n'), ((1985, 2008), 'xml.dom.minidom.parseString', 'minidom.parseString', (['st'], {}), '(st)\n', (2004, 2008), True, 'import xml.dom.minidom as minidom\n'), ((2847, 2883), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['molecule', '"""atomArray"""'], {}), "(molecule, 'atomArray')\n", (2860, 2883), True, 'import xml.etree.cElementTree as ET\n'), ((3221, 3257), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['molecule', '"""bondArray"""'], {}), "(molecule, 'bondArray')\n", (3234, 3257), True, 'import xml.etree.cElementTree as ET\n'), ((3632, 3671), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['molecule', '"""propertyList"""'], {}), "(molecule, 'propertyList')\n", (3645, 3671), True, 'import xml.etree.cElementTree as ET\n'), ((3709, 3771), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['propertylist', '"""property"""', "{'dictRef': 'me:ZPE'}"], {}), "(propertylist, 'property', {'dictRef': 'me:ZPE'})\n", (3722, 3771), True, 'import xml.etree.cElementTree as ET\n'), ((1370, 1401), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['root', '"""me:title"""'], {}), "(root, 'me:title')\n", (1383, 1401), True, 'import xml.etree.cElementTree as ET\n'), ((3150, 3188), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['atomarray', '"""atom"""', 'args'], {}), "(atomarray, 'atom', args)\n", (3163, 3188), True, 'import xml.etree.cElementTree as ET\n'), ((3784, 3836), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['property', '"""scalar"""', "{'units': 'cm-1'}"], {}), "(property, 'scalar', {'units': 'cm-1'})\n", (3797, 3836), True, 'import xml.etree.cElementTree as ET\n'), ((2399, 2424), 'random.uniform', 'random.uniform', (['(-3.0)', '(3.0)'], {}), '(-3.0, 3.0)\n', (2413, 2424), False, 'import random\n'), ((2423, 2448), 'random.uniform', 'random.uniform', (['(-3.0)', '(3.0)'], {}), '(-3.0, 3.0)\n', (2437, 2448), False, 'import random\n'), ((2447, 2472), 'random.uniform', 'random.uniform', (['(-3.0)', '(3.0)'], {}), '(-3.0, 3.0)\n', (2461, 2472), False, 'import random\n'), ((3547, 3585), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['bondarray', '"""bond"""', 'args'], {}), "(bondarray, 'bond', args)\n", (3560, 3585), True, 'import xml.etree.cElementTree as ET\n')]
|
r'''This dataloader is an attemp to make a master DL that provides 2 augmented version
of a sparse clip (covering minimum 64 frames) and 2 augmented versions of 4 dense clips
(covering 16 frames temporal span minimum)'''
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import config as cfg
import random
import pickle
import parameters as params
import json
import math
import cv2
# from tqdm import tqdm
import time
import torchvision.transforms as trans
# from decord import VideoReader
class ss_dataset_gen1(Dataset):
def __init__(self, shuffle = True, data_percentage = 1.0, split = 1):
#####################
# self.all_paths = open(os.path.join(cfg.path_folder,'train_vids.txt'),'r').read().splitlines()
if split == 1:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist01.txt'),'r').read().splitlines()
elif split ==2:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist02.txt'),'r').read().splitlines()
elif split ==3:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist03.txt'),'r').read().splitlines()
else:
print(f'Invalid split input: {split}')
#####################
self.shuffle = shuffle
if self.shuffle:
random.shuffle(self.all_paths)
self.data_percentage = data_percentage
self.data_limit = int(len(self.all_paths)*self.data_percentage)
self.data = self.all_paths[0: self.data_limit]
self.PIL = trans.ToPILImage()
self.TENSOR = trans.ToTensor()
self.erase_size = 19
def __len__(self):
return len(self.data)
def __getitem__(self,index):
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path = self.process_data(index)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path
def process_data(self, idx):
vid_path = cfg.path_folder + '/UCF-101/' + self.data[idx].split(' ')[0]
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense = self.build_clip(vid_path)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path
def build_clip(self, vid_path):
try:
cap = cv2.VideoCapture(vid_path)
cap.set(1, 0)
frame_count = cap.get(7)
if frame_count <= 56:
# print(f'Video {vid_path} has insufficient frames')
return None, None, None, None, None, None, None, None, None, None, None, None
############################# frame_list maker start here#################################
min_temporal_span_sparse = params.num_frames*params.sr_ratio
if frame_count > min_temporal_span_sparse:
start_frame = np.random.randint(0,frame_count-min_temporal_span_sparse)
#Dynamic skip rate experiment
# skip_max = int((frame_count - start_frame)/params.num_frames)
# # here 4 is the skip rate ratio = 4 chunks
# if skip_max >= 16:
# sr_sparse = np.random.choice([4,8,12,16])
# elif (skip_max<16) and (skip_max>=12):
# sr_sparse = np.random.choice([4,8,12])
# elif (skip_max<12) and (skip_max>=8):
# sr_sparse = np.random.choice([4,8])
# else:
sr_sparse = 4
else:
start_frame = 0
sr_sparse = 4
sr_dense = int(sr_sparse/4)
frames_sparse = [start_frame] + [start_frame + i*sr_sparse for i in range(1,params.num_frames)]
frames_dense = [[frames_sparse[j*4]]+[frames_sparse[j*4] + i*sr_dense for i in range(1,params.num_frames)] for j in range(4)]
################################ frame list maker finishes here ###########################
################################ actual clip builder starts here ##########################
sparse_clip = []
dense_clip0 = []
dense_clip1 = []
dense_clip2 = []
dense_clip3 = []
a_sparse_clip = []
a_dense_clip0 = []
a_dense_clip1 = []
a_dense_clip2 = []
a_dense_clip3 = []
list_sparse = []
list_dense = [[] for i in range(4)]
count = -1
random_array = np.random.rand(10,8)
x_erase = np.random.randint(0,params.reso_h, size = (10,))
y_erase = np.random.randint(0,params.reso_w, size = (10,))
cropping_factor1 = np.random.uniform(0.6, 1, size = (10,)) # on an average cropping factor is 80% i.e. covers 64% area
x0 = [np.random.randint(0, params.ori_reso_w - params.ori_reso_w*cropping_factor1[ii] + 1) for ii in range(10)]
y0 = [np.random.randint(0, params.ori_reso_h - params.ori_reso_h*cropping_factor1[ii] + 1) for ii in range(10)]
contrast_factor1 = np.random.uniform(0.75,1.25, size = (10,))
hue_factor1 = np.random.uniform(-0.1,0.1, size = (10,))
saturation_factor1 = np.random.uniform(0.75,1.25, size = (10,))
brightness_factor1 = np.random.uniform(0.75,1.25,size = (10,))
gamma1 = np.random.uniform(0.75,1.25, size = (10,))
erase_size1 = np.random.randint(int(self.erase_size/2),self.erase_size, size = (10,))
erase_size2 = np.random.randint(int(self.erase_size/2),self.erase_size, size = (10,))
random_color_dropped = np.random.randint(0,3,(10))
while(cap.isOpened()):
count += 1
ret, frame = cap.read()
if ((count not in frames_sparse) and (count not in frames_dense[0]) \
and (count not in frames_dense[1]) and (count not in frames_dense[2]) \
and (count not in frames_dense[3])) and (ret == True):
continue
if ret == True:
if (count in frames_sparse):
sparse_clip.append(self.augmentation(frame, random_array[0], x_erase[0], y_erase[0], cropping_factor1[0],\
x0[0], y0[0], contrast_factor1[0], hue_factor1[0], saturation_factor1[0], brightness_factor1[0],\
gamma1[0],erase_size1[0],erase_size2[0], random_color_dropped[0]))
a_sparse_clip.append(self.augmentation(frame, random_array[1], x_erase[1], y_erase[1], cropping_factor1[1],\
x0[1], y0[1], contrast_factor1[1], hue_factor1[1], saturation_factor1[1], brightness_factor1[1],\
gamma1[1],erase_size1[1],erase_size2[1], random_color_dropped[1]))
list_sparse.append(count)
if (count in frames_dense[0]):
dense_clip0.append(self.augmentation(frame, random_array[2], x_erase[2], y_erase[2], cropping_factor1[2],\
x0[2], y0[2], contrast_factor1[2], hue_factor1[2], saturation_factor1[2], brightness_factor1[2],\
gamma1[2],erase_size1[2],erase_size2[2], random_color_dropped[2]))
a_dense_clip0.append(self.augmentation(frame, random_array[3], x_erase[3], y_erase[3], cropping_factor1[3],\
x0[3], y0[3], contrast_factor1[3], hue_factor1[3], saturation_factor1[3], brightness_factor1[3],\
gamma1[3],erase_size1[3],erase_size2[3], random_color_dropped[3]))
list_dense[0].append(count)
if (count in frames_dense[1]):
dense_clip1.append(self.augmentation(frame, random_array[4], x_erase[4], y_erase[4], cropping_factor1[4],\
x0[4], y0[4], contrast_factor1[4], hue_factor1[4], saturation_factor1[4], brightness_factor1[4],\
gamma1[4],erase_size1[4],erase_size2[4], random_color_dropped[4]))
a_dense_clip1.append(self.augmentation(frame, random_array[5], x_erase[5], y_erase[5], cropping_factor1[5],\
x0[5], y0[5], contrast_factor1[5], hue_factor1[5], saturation_factor1[5], brightness_factor1[5],\
gamma1[5],erase_size1[5],erase_size2[5], random_color_dropped[5]))
list_dense[1].append(count)
if (count in frames_dense[2]):
dense_clip2.append(self.augmentation(frame, random_array[6], x_erase[6], y_erase[6], cropping_factor1[6],\
x0[6], y0[6], contrast_factor1[6], hue_factor1[6], saturation_factor1[6], brightness_factor1[6],\
gamma1[6],erase_size1[6],erase_size2[6], random_color_dropped[6]))
a_dense_clip2.append(self.augmentation(frame, random_array[7], x_erase[7], y_erase[7], cropping_factor1[7],\
x0[7], y0[7], contrast_factor1[7], hue_factor1[7], saturation_factor1[7], brightness_factor1[7],\
gamma1[7],erase_size1[7],erase_size2[7], random_color_dropped[7]))
list_dense[2].append(count)
if (count in frames_dense[3]):
dense_clip3.append(self.augmentation(frame, random_array[8], x_erase[8], y_erase[8], cropping_factor1[8],\
x0[8], y0[8], contrast_factor1[8], hue_factor1[8], saturation_factor1[8], brightness_factor1[8],\
gamma1[8],erase_size1[8],erase_size2[8], random_color_dropped[8]))
a_dense_clip3.append(self.augmentation(frame, random_array[9], x_erase[9], y_erase[9], cropping_factor1[9],\
x0[9], y0[9], contrast_factor1[9], hue_factor1[9], saturation_factor1[9], brightness_factor1[9],\
gamma1[9],erase_size1[9],erase_size2[9], random_color_dropped[9]))
list_dense[3].append(count)
else:
break
if len(sparse_clip) < params.num_frames and len(sparse_clip)>13:
# if params.num_frames - len(sparse_clip) >= 1:
# print(f'sparse_clip {vid_path} is missing {params.num_frames - len(sparse_clip)} frames')
remaining_num_frames = params.num_frames - len(sparse_clip)
sparse_clip = sparse_clip + sparse_clip[::-1][1:remaining_num_frames+1]
a_sparse_clip = a_sparse_clip + a_sparse_clip[::-1][1:remaining_num_frames+1]
if len(dense_clip3) < params.num_frames and len(dense_clip3)>7:
# if params.num_frames - len(dense_clip3) >= 1:
# print(f'dense_clip3 {vid_path} is missing {params.num_frames - len(dense_clip3)} frames')
remaining_num_frames = params.num_frames - len(dense_clip3)
dense_clip3 = dense_clip3 + dense_clip3[::-1][1:remaining_num_frames+1]
a_dense_clip3 = a_dense_clip3 + a_dense_clip3[::-1][1:remaining_num_frames+1]
try:
assert(len(sparse_clip)==params.num_frames)
assert(len(dense_clip0)==params.num_frames)
assert(len(dense_clip1)==params.num_frames)
assert(len(dense_clip2)==params.num_frames)
assert(len(dense_clip3)==params.num_frames)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense
except:
print(f'Clip {vid_path} has some frames reading issue, failed')
return None, None, None, None, None, None, None, None, None, None, None, None
except:
print(f'Clip {vid_path} has some unknown issue, failed')
return None, None, None, None, None, None, None, None, None, None, None, None
def augmentation(self, image, random_array, x_erase, y_erase, cropping_factor1,\
x0, y0, contrast_factor1, hue_factor1, saturation_factor1, brightness_factor1,\
gamma1,erase_size1,erase_size2, random_color_dropped):
image = self.PIL(image)
image = trans.functional.resized_crop(image,y0,x0,int(params.ori_reso_h*cropping_factor1),int(params.ori_reso_h*cropping_factor1),(params.reso_h,params.reso_w),interpolation=2)
if random_array[0] < 0.125:
image = trans.functional.adjust_contrast(image, contrast_factor = contrast_factor1) #0.75 to 1.25
if random_array[1] < 0.3 :
image = trans.functional.adjust_hue(image, hue_factor = hue_factor1) # hue factor will be between [-0.1, 0.1]
if random_array[2] < 0.3 :
image = trans.functional.adjust_saturation(image, saturation_factor = saturation_factor1) # brightness factor will be between [0.75, 1,25]
if random_array[3] < 0.3 :
image = trans.functional.adjust_brightness(image, brightness_factor = brightness_factor1) # brightness factor will be between [0.75, 1,25]
if random_array[0] > 0.125 and random_array[0] < 0.25:
image = trans.functional.adjust_contrast(image, contrast_factor = contrast_factor1) #0.75 to 1.25
if random_array[4] > 0.70:
if random_array[4] < 0.875:
image = trans.functional.to_grayscale(image, num_output_channels = 3)
if random_array[5] > 0.25:
image = trans.functional.adjust_gamma(image, gamma = gamma1, gain=1) #gamma range [0.8, 1.2]
else:
image = trans.functional.to_tensor(image)
image[random_color_dropped,:,:] = 0
image = self.PIL(image)
if random_array[6] > 0.5:
image = trans.functional.hflip(image)
image = trans.functional.to_tensor(image)
if random_array[7] < 0.5 :
image = trans.functional.erase(image, x_erase, y_erase, erase_size1, erase_size2, v=0)
return image
def collate_fn2(batch):
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path = [], [], [], [], [], [], [], [], [], [], [], [], []
for item in batch:
if not (None in item):
sparse_clip.append(torch.stack(item[0],dim=0))
dense_clip0.append(torch.stack(item[1],dim=0))
dense_clip1.append(torch.stack(item[2],dim=0))
dense_clip2.append(torch.stack(item[3],dim=0))
dense_clip3.append(torch.stack(item[4],dim=0))
a_sparse_clip.append(torch.stack(item[5],dim=0))
a_dense_clip0.append(torch.stack(item[6],dim=0))
a_dense_clip1.append(torch.stack(item[7],dim=0))
a_dense_clip2.append(torch.stack(item[8],dim=0))
a_dense_clip3.append(torch.stack(item[9],dim=0))
list_sparse.append(np.asarray(item[10]))
list_dense.append(np.asarray(item[11]))
vid_path.append(item[12])
sparse_clip = torch.stack(sparse_clip, dim=0)
dense_clip0 = torch.stack(dense_clip0, dim=0)
dense_clip1 = torch.stack(dense_clip1, dim=0)
dense_clip2 = torch.stack(dense_clip2, dim=0)
dense_clip3 = torch.stack(dense_clip3, dim=0)
a_sparse_clip = torch.stack(a_sparse_clip, dim=0)
a_dense_clip0 = torch.stack(a_dense_clip0, dim=0)
a_dense_clip1 = torch.stack(a_dense_clip1, dim=0)
a_dense_clip2 = torch.stack(a_dense_clip2, dim=0)
a_dense_clip3 = torch.stack(a_dense_clip3, dim=0)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path
if __name__ == '__main__':
train_dataset = ss_dataset_gen1(shuffle = True, data_percentage = 1.0)
train_dataloader = DataLoader(train_dataset, batch_size=40, \
shuffle=False, num_workers=4, collate_fn=collate_fn2)
print(f'Step involved: {len(train_dataset)/24}')
t=time.time()
for i, (sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path) in enumerate(train_dataloader):
if (i+1)%25 == 0:
print(sparse_clip.shape)
print(dense_clip3.shape)
print()
print(f'Time taken to load data is {time.time()-t}')
|
[
"torchvision.transforms.functional.to_tensor",
"random.shuffle",
"torchvision.transforms.functional.adjust_saturation",
"numpy.random.randint",
"os.path.join",
"torch.utils.data.DataLoader",
"torchvision.transforms.functional.hflip",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.functional.erase",
"torchvision.transforms.functional.adjust_hue",
"torchvision.transforms.functional.adjust_contrast",
"numpy.asarray",
"torchvision.transforms.functional.to_grayscale",
"torchvision.transforms.functional.adjust_gamma",
"torchvision.transforms.functional.adjust_brightness",
"numpy.random.uniform",
"torch.stack",
"time.time",
"cv2.VideoCapture",
"numpy.random.rand",
"torchvision.transforms.ToTensor"
] |
[((16132, 16163), 'torch.stack', 'torch.stack', (['sparse_clip'], {'dim': '(0)'}), '(sparse_clip, dim=0)\n', (16143, 16163), False, 'import torch\n'), ((16182, 16213), 'torch.stack', 'torch.stack', (['dense_clip0'], {'dim': '(0)'}), '(dense_clip0, dim=0)\n', (16193, 16213), False, 'import torch\n'), ((16232, 16263), 'torch.stack', 'torch.stack', (['dense_clip1'], {'dim': '(0)'}), '(dense_clip1, dim=0)\n', (16243, 16263), False, 'import torch\n'), ((16282, 16313), 'torch.stack', 'torch.stack', (['dense_clip2'], {'dim': '(0)'}), '(dense_clip2, dim=0)\n', (16293, 16313), False, 'import torch\n'), ((16332, 16363), 'torch.stack', 'torch.stack', (['dense_clip3'], {'dim': '(0)'}), '(dense_clip3, dim=0)\n', (16343, 16363), False, 'import torch\n'), ((16385, 16418), 'torch.stack', 'torch.stack', (['a_sparse_clip'], {'dim': '(0)'}), '(a_sparse_clip, dim=0)\n', (16396, 16418), False, 'import torch\n'), ((16439, 16472), 'torch.stack', 'torch.stack', (['a_dense_clip0'], {'dim': '(0)'}), '(a_dense_clip0, dim=0)\n', (16450, 16472), False, 'import torch\n'), ((16493, 16526), 'torch.stack', 'torch.stack', (['a_dense_clip1'], {'dim': '(0)'}), '(a_dense_clip1, dim=0)\n', (16504, 16526), False, 'import torch\n'), ((16547, 16580), 'torch.stack', 'torch.stack', (['a_dense_clip2'], {'dim': '(0)'}), '(a_dense_clip2, dim=0)\n', (16558, 16580), False, 'import torch\n'), ((16601, 16634), 'torch.stack', 'torch.stack', (['a_dense_clip3'], {'dim': '(0)'}), '(a_dense_clip3, dim=0)\n', (16612, 16634), False, 'import torch\n'), ((16987, 17085), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': '(40)', 'shuffle': '(False)', 'num_workers': '(4)', 'collate_fn': 'collate_fn2'}), '(train_dataset, batch_size=40, shuffle=False, num_workers=4,\n collate_fn=collate_fn2)\n', (16997, 17085), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((17151, 17162), 'time.time', 'time.time', ([], {}), '()\n', (17160, 17162), False, 'import time\n'), ((1717, 1735), 'torchvision.transforms.ToPILImage', 'trans.ToPILImage', ([], {}), '()\n', (1733, 1735), True, 'import torchvision.transforms as trans\n'), ((1758, 1774), 'torchvision.transforms.ToTensor', 'trans.ToTensor', ([], {}), '()\n', (1772, 1774), True, 'import torchvision.transforms as trans\n'), ((14838, 14871), 'torchvision.transforms.functional.to_tensor', 'trans.functional.to_tensor', (['image'], {}), '(image)\n', (14864, 14871), True, 'import torchvision.transforms as trans\n'), ((1484, 1514), 'random.shuffle', 'random.shuffle', (['self.all_paths'], {}), '(self.all_paths)\n', (1498, 1514), False, 'import random\n'), ((2954, 2980), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vid_path'], {}), '(vid_path)\n', (2970, 2980), False, 'import cv2\n'), ((5208, 5229), 'numpy.random.rand', 'np.random.rand', (['(10)', '(8)'], {}), '(10, 8)\n', (5222, 5229), True, 'import numpy as np\n'), ((5251, 5298), 'numpy.random.randint', 'np.random.randint', (['(0)', 'params.reso_h'], {'size': '(10,)'}), '(0, params.reso_h, size=(10,))\n', (5268, 5298), True, 'import numpy as np\n'), ((5322, 5369), 'numpy.random.randint', 'np.random.randint', (['(0)', 'params.reso_w'], {'size': '(10,)'}), '(0, params.reso_w, size=(10,))\n', (5339, 5369), True, 'import numpy as np\n'), ((5404, 5441), 'numpy.random.uniform', 'np.random.uniform', (['(0.6)', '(1)'], {'size': '(10,)'}), '(0.6, 1, size=(10,))\n', (5421, 5441), True, 'import numpy as np\n'), ((5794, 5835), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (5811, 5835), True, 'import numpy as np\n'), ((5863, 5903), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': '(10,)'}), '(-0.1, 0.1, size=(10,))\n', (5880, 5903), True, 'import numpy as np\n'), ((5938, 5979), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (5955, 5979), True, 'import numpy as np\n'), ((6014, 6055), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (6031, 6055), True, 'import numpy as np\n'), ((6077, 6118), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (6094, 6118), True, 'import numpy as np\n'), ((6353, 6380), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (6370, 6380), True, 'import numpy as np\n'), ((13457, 13530), 'torchvision.transforms.functional.adjust_contrast', 'trans.functional.adjust_contrast', (['image'], {'contrast_factor': 'contrast_factor1'}), '(image, contrast_factor=contrast_factor1)\n', (13489, 13530), True, 'import torchvision.transforms as trans\n'), ((13602, 13660), 'torchvision.transforms.functional.adjust_hue', 'trans.functional.adjust_hue', (['image'], {'hue_factor': 'hue_factor1'}), '(image, hue_factor=hue_factor1)\n', (13629, 13660), True, 'import torchvision.transforms as trans\n'), ((13759, 13838), 'torchvision.transforms.functional.adjust_saturation', 'trans.functional.adjust_saturation', (['image'], {'saturation_factor': 'saturation_factor1'}), '(image, saturation_factor=saturation_factor1)\n', (13793, 13838), True, 'import torchvision.transforms as trans\n'), ((13945, 14024), 'torchvision.transforms.functional.adjust_brightness', 'trans.functional.adjust_brightness', (['image'], {'brightness_factor': 'brightness_factor1'}), '(image, brightness_factor=brightness_factor1)\n', (13979, 14024), True, 'import torchvision.transforms as trans\n'), ((14159, 14232), 'torchvision.transforms.functional.adjust_contrast', 'trans.functional.adjust_contrast', (['image'], {'contrast_factor': 'contrast_factor1'}), '(image, contrast_factor=contrast_factor1)\n', (14191, 14232), True, 'import torchvision.transforms as trans\n'), ((14789, 14818), 'torchvision.transforms.functional.hflip', 'trans.functional.hflip', (['image'], {}), '(image)\n', (14811, 14818), True, 'import torchvision.transforms as trans\n'), ((14928, 15006), 'torchvision.transforms.functional.erase', 'trans.functional.erase', (['image', 'x_erase', 'y_erase', 'erase_size1', 'erase_size2'], {'v': '(0)'}), '(image, x_erase, y_erase, erase_size1, erase_size2, v=0)\n', (14950, 15006), True, 'import torchvision.transforms as trans\n'), ((3502, 3562), 'numpy.random.randint', 'np.random.randint', (['(0)', '(frame_count - min_temporal_span_sparse)'], {}), '(0, frame_count - min_temporal_span_sparse)\n', (3519, 3562), True, 'import numpy as np\n'), ((5522, 5612), 'numpy.random.randint', 'np.random.randint', (['(0)', '(params.ori_reso_w - params.ori_reso_w * cropping_factor1[ii] + 1)'], {}), '(0, params.ori_reso_w - params.ori_reso_w *\n cropping_factor1[ii] + 1)\n', (5539, 5612), True, 'import numpy as np\n'), ((5656, 5746), 'numpy.random.randint', 'np.random.randint', (['(0)', '(params.ori_reso_h - params.ori_reso_h * cropping_factor1[ii] + 1)'], {}), '(0, params.ori_reso_h - params.ori_reso_h *\n cropping_factor1[ii] + 1)\n', (5673, 5746), True, 'import numpy as np\n'), ((14348, 14407), 'torchvision.transforms.functional.to_grayscale', 'trans.functional.to_grayscale', (['image'], {'num_output_channels': '(3)'}), '(image, num_output_channels=3)\n', (14377, 14407), True, 'import torchvision.transforms as trans\n'), ((14608, 14641), 'torchvision.transforms.functional.to_tensor', 'trans.functional.to_tensor', (['image'], {}), '(image)\n', (14634, 14641), True, 'import torchvision.transforms as trans\n'), ((15384, 15411), 'torch.stack', 'torch.stack', (['item[0]'], {'dim': '(0)'}), '(item[0], dim=0)\n', (15395, 15411), False, 'import torch\n'), ((15444, 15471), 'torch.stack', 'torch.stack', (['item[1]'], {'dim': '(0)'}), '(item[1], dim=0)\n', (15455, 15471), False, 'import torch\n'), ((15503, 15530), 'torch.stack', 'torch.stack', (['item[2]'], {'dim': '(0)'}), '(item[2], dim=0)\n', (15514, 15530), False, 'import torch\n'), ((15562, 15589), 'torch.stack', 'torch.stack', (['item[3]'], {'dim': '(0)'}), '(item[3], dim=0)\n', (15573, 15589), False, 'import torch\n'), ((15621, 15648), 'torch.stack', 'torch.stack', (['item[4]'], {'dim': '(0)'}), '(item[4], dim=0)\n', (15632, 15648), False, 'import torch\n'), ((15683, 15710), 'torch.stack', 'torch.stack', (['item[5]'], {'dim': '(0)'}), '(item[5], dim=0)\n', (15694, 15710), False, 'import torch\n'), ((15744, 15771), 'torch.stack', 'torch.stack', (['item[6]'], {'dim': '(0)'}), '(item[6], dim=0)\n', (15755, 15771), False, 'import torch\n'), ((15805, 15832), 'torch.stack', 'torch.stack', (['item[7]'], {'dim': '(0)'}), '(item[7], dim=0)\n', (15816, 15832), False, 'import torch\n'), ((15866, 15893), 'torch.stack', 'torch.stack', (['item[8]'], {'dim': '(0)'}), '(item[8], dim=0)\n', (15877, 15893), False, 'import torch\n'), ((15927, 15954), 'torch.stack', 'torch.stack', (['item[9]'], {'dim': '(0)'}), '(item[9], dim=0)\n', (15938, 15954), False, 'import torch\n'), ((15988, 16008), 'numpy.asarray', 'np.asarray', (['item[10]'], {}), '(item[10])\n', (15998, 16008), True, 'import numpy as np\n'), ((16040, 16060), 'numpy.asarray', 'np.asarray', (['item[11]'], {}), '(item[11])\n', (16050, 16060), True, 'import numpy as np\n'), ((14481, 14539), 'torchvision.transforms.functional.adjust_gamma', 'trans.functional.adjust_gamma', (['image'], {'gamma': 'gamma1', 'gain': '(1)'}), '(image, gamma=gamma1, gain=1)\n', (14510, 14539), True, 'import torchvision.transforms as trans\n'), ((17572, 17583), 'time.time', 'time.time', ([], {}), '()\n', (17581, 17583), False, 'import time\n'), ((920, 985), 'os.path.join', 'os.path.join', (['cfg.path_folder', '"""ucfTrainTestlist/trainlist01.txt"""'], {}), "(cfg.path_folder, 'ucfTrainTestlist/trainlist01.txt')\n", (932, 985), False, 'import os\n'), ((1070, 1135), 'os.path.join', 'os.path.join', (['cfg.path_folder', '"""ucfTrainTestlist/trainlist02.txt"""'], {}), "(cfg.path_folder, 'ucfTrainTestlist/trainlist02.txt')\n", (1082, 1135), False, 'import os\n'), ((1220, 1285), 'os.path.join', 'os.path.join', (['cfg.path_folder', '"""ucfTrainTestlist/trainlist03.txt"""'], {}), "(cfg.path_folder, 'ucfTrainTestlist/trainlist03.txt')\n", (1232, 1285), False, 'import os\n')]
|
from location import Location
import random
class Party:
def __init__(self, simulation, name, colour, strategy=""):
self.location = Location()
self.simulation = simulation
if strategy == "":
self.random_strategy()
else:
self.strategy = strategy
self.name = name
self.colour = colour # random_colour() ???
self.voters = []
self.previous_count = -1
# def random_strategy(self):
# self.strategy = random.choose(self.simulation.get_allowed_strategies())
def random_strategy(self):
a = random.randint(1,5)
if a == 1:
self.strategy = "sticker"
elif a == 2:
self.strategy = "predator"
elif a == 3:
self.strategy = "hunter"
elif a == 4:
self.strategy = "aggregator"
elif a == 5:
self.strategy = "random"
def add_voter(self, voter):
return self.voters.append(voter)
def reset_voters(self):
self.voters = []
def count_voters(self):
return len(self.voters)
def update_location(self):
if self.strategy == "sticker":
self.update_location_sticker()
elif self.strategy == "predator":
self.update_location_predator()
elif self.strategy == "hunter":
self.update_location_hunter()
elif self.strategy == "aggregator":
self.update_location_aggregator()
elif self.strategy == "random":
self.update_location_random()
else:
print("Strategy " + self.strategy + " does not exist!")
def update_location_predator(self):
parties = self.simulation.get_parties()
biggest_party = self
for p in parties:
if biggest_party.count_voters() < p.count_voters():
biggest_party = p
self.location.move_towards(biggest_party.location)
def update_location_aggregator(self):
if len(self.voters) > 0:
sum_x = 0
sum_y = 0
for voter in self.voters:
sum_x += voter.location.x
sum_y += voter.location.y
target_location = Location()
target_location.set_x(sum_x / len(self.voters))
target_location.set_y(sum_y / len(self.voters))
self.location.move_towards(target_location)
def update_location_hunter(self):
#get previous move
#if voters before prev move >= voters after prev move
# then turn 180 degrees and move again anywhere 90 degrees either side
#if voters before prev move < voters after prev move
# move same way as previous move again
if self.previous_count == -1:
direction = random.random() * 360.0
elif self.previous_count <= self.count_voters():
direction = self.previous_direction
else:
lower_limit = self.previous_direction + 90
direction = (random.random() * 180.0 + lower_limit) % 360
self.location.move_angle(direction)
self.previous_direction = direction
self.previous_count = self.count_voters()
# def save_state(self):
# self.previous_count = self.count_voters()
def update_location_random(self):
self.location.random_move()
def update_location_sticker(self):
pass
def get_location(self):
return self.location
def get_strategy(self):
return self.strategy
def get_name(self):
return self.name
def get_colour(self):
return self.colour
def get_voters(self):
return self.voters
|
[
"random.random",
"random.randint",
"location.Location"
] |
[((140, 150), 'location.Location', 'Location', ([], {}), '()\n', (148, 150), False, 'from location import Location\n'), ((547, 567), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (561, 567), False, 'import random\n'), ((1963, 1973), 'location.Location', 'Location', ([], {}), '()\n', (1971, 1973), False, 'from location import Location\n'), ((2486, 2501), 'random.random', 'random.random', ([], {}), '()\n', (2499, 2501), False, 'import random\n'), ((2689, 2704), 'random.random', 'random.random', ([], {}), '()\n', (2702, 2704), False, 'import random\n')]
|
from snmachine import sndata,snfeatures
import numpy as np
import pandas
from astropy.table import Table
import pickle
import os,sys
'''
print('starting readin of monster files')
#raw_data=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/test_set.csv')
raw_data=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/training_set.csv')
print('read in data set')
#raw_metadata=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/test_set_metadata.csv')
raw_metadata=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/training_set_metadata.csv')
print('read in metadata')
sys.stdout.flush()
#objects=np.unique(raw_data['object_id'])
#filters=np.unique(raw_data['passband']).astype('str')
'''
index=int(sys.argv[1])
print('Performing feature extraction on batch %d'%index)
out_folder='/share/hypatia/snmachine_resources/data/plasticc/data_products/plasticc_test/with_nondetection_cutting/fullset/data/'
print('loading data')
sys.stdout.flush()
with open(os.path.join(out_folder,'dataset_%d.pickle'%index),'rb') as f:
d=pickle.load(f)
int_folder=os.path.join(out_folder,'int')
feats_folder=os.path.join(out_folder,'features')
print('data loaded')
sys.stdout.flush()
#d=sndata.EmptyDataset(filter_set=filters,survey_name='plasticc',folder=out_folder)
#d.object_names=d.object_names[:10]
print('nobj: '+str(len(d.object_names)))
print('extracting features')
sys.stdout.flush()
wf=snfeatures.WaveletFeatures(wavelet='sym2',ngp=1100)
pca_folder='/share/hypatia/snmachine_resources/data/plasticc/dummy_pca/'
feats=wf.extract_features(d,nprocesses=1,save_output='all',output_root=int_folder, recompute_pca=False, pca_path=pca_folder,xmax=1100)
feats.write(os.path.join(feats_folder, 'wavelet_features_%d.fits'%index),overwrite=True)
'''
with open(os.path.join(feats_folder,'PCA_mean.pickle'),'wb') as f1:
pickle.dump(wf.PCA_mean,f1)
with open(os.path.join(feats_folder,'PCA_eigenvals.pickle'),'wb') as f2:
pickle.dump(wf.PCA_eigenvals,f2)
with open(os.path.join(feats_folder,'PCA_eigenvectors.pickle'),'wb') as f3:
pickle.dump(wf.PCA_eigenvectors,f3)
np.savetxt(os.path.join(feats_folder,'PCA_mean.txt'),wf.PCA_mean)
np.savetxt(os.path.join(feats_folder,'PCA_eigenvals.txt'),wf.PCA_eigenvals)
np.savetxt(os.path.join(feats_folder,'PCA_eigenvectors.txt'),wf.PCA_eigenvectors)
'''
|
[
"snmachine.snfeatures.WaveletFeatures",
"pickle.load",
"sys.stdout.flush",
"os.path.join"
] |
[((984, 1002), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1000, 1002), False, 'import os, sys\n'), ((1105, 1136), 'os.path.join', 'os.path.join', (['out_folder', '"""int"""'], {}), "(out_folder, 'int')\n", (1117, 1136), False, 'import os, sys\n'), ((1149, 1185), 'os.path.join', 'os.path.join', (['out_folder', '"""features"""'], {}), "(out_folder, 'features')\n", (1161, 1185), False, 'import os, sys\n'), ((1207, 1225), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1223, 1225), False, 'import os, sys\n'), ((1418, 1436), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1434, 1436), False, 'import os, sys\n'), ((1440, 1492), 'snmachine.snfeatures.WaveletFeatures', 'snfeatures.WaveletFeatures', ([], {'wavelet': '"""sym2"""', 'ngp': '(1100)'}), "(wavelet='sym2', ngp=1100)\n", (1466, 1492), False, 'from snmachine import sndata, snfeatures\n'), ((1079, 1093), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1090, 1093), False, 'import pickle\n'), ((1714, 1776), 'os.path.join', 'os.path.join', (['feats_folder', "('wavelet_features_%d.fits' % index)"], {}), "(feats_folder, 'wavelet_features_%d.fits' % index)\n", (1726, 1776), False, 'import os, sys\n'), ((1013, 1066), 'os.path.join', 'os.path.join', (['out_folder', "('dataset_%d.pickle' % index)"], {}), "(out_folder, 'dataset_%d.pickle' % index)\n", (1025, 1066), False, 'import os, sys\n')]
|
import os
import json
import errno
import sys
import argparse
from dockstream.utils.execute_external.execute import Executor
from dockstream.utils import files_paths
from dockstream.utils.enums.docking_enum import DockingConfigurationEnum
_DC = DockingConfigurationEnum()
def run_script(input_path: str) -> dict:
"""this method takes an input path to either a folder containing DockStream json files or a single json file and
returns a dictionary whose keys are the json names and the corresponding values are the paths to the json
file. The dictionary will be looped later to run DockStream
:param input_path: path to either a folder of json files or a single json file
:raises FileNotFoundError: this error is raised if input_path is neither a folder nor a file
:return: dictionary, keys are the DockStream json names and values are the paths to them
"""
# first check if input_path is valid (either a folder containing json files or a single json file)
if not os.path.isdir(input_path) and not os.path.isfile(input_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), input_path)
# if input_path is a folder, ensure it is not empty and that it contains at least 1 json file
if os.path.isdir(input_path):
if not os.listdir(input_path):
sys.exit(input_path + ' folder is empty. Please ensure your DockStream json files are added to the folder.')
elif not any(file.endswith('.json') for file in os.listdir(input_path)):
sys.exit(input_path + ' contains no json files. Please ensure your DockStream json files are added to the folder.')
# at this point, the path must be a file. Check that it is in json format
if os.path.isfile(input_path):
if not input_path.endswith('.json'):
sys.exit(input_path + ' is not a json file. Please ensure it is in json format.')
# initialize a dictionary to hold all DockStream runs
batch_runs = {}
# loop through all json files and update the paths if input_path if a directory
if os.path.isdir(input_path):
all_runs = [file for file in os.listdir(input_path) if file.endswith('.json')]
for json in all_runs:
batch_runs[json.replace('.json', '')] = os.path.join(input_path, json)
# at this point, input path must be a single json file
else:
json_name = os.path.basename(os.path.normpath(input_path)).replace('.json', '')
batch_runs[json_name] = input_path
return batch_runs
if __name__ == '__main__':
# take user specified input parameters to run the benchmarking script
parser = argparse.ArgumentParser(description='Facilitates batch DockStream execution.')
parser.add_argument('-input_path', type=str, required=True, help='The path to either a folder of DockStream json files or a single json file.')
args = parser.parse_args()
batch_runs = run_script(args.input_path)
executor = Executor()
# initialize a dictionary to store the names of all runs that did not enforce "best_per_ligand"
non_bpl_runs = {}
# loop through all user json files and run DockStream
for trial_name, json_path in batch_runs.items():
# check if the current DockStream run has "best_per_ligand" enforced
with open(json_path, "r") as f:
parameters = json.load(f)
# in case output mode was not specified in the configuration json
try:
for docking_run in parameters[_DC.DOCKING][_DC.DOCKING_RUNS]:
output_mode = docking_run[_DC.OUTPUT][_DC.OUTPUT_SCORES][_DC.OUTPUT_MODE]
if output_mode != _DC.OUTPUT_MODE_BESTPERLIGAND:
non_bpl_runs[trial_name] = output_mode
break
except:
pass
print(f'Running {trial_name}')
result = executor.execute(command=sys.executable, arguments=[files_paths.attach_root_path('docker.py'),
'-conf', json_path, '-debug'], check=False)
print(result)
# print out error messages (if applicable) for the current DockStream run
if result.returncode != 0:
print(f'There was an error with {trial_name} DockStream run.')
print(result.stdout)
print(result.stderr)
if bool(non_bpl_runs):
# print the names of the runs which did not enforce "best_per_ligand"
print(f"List of runs which did not have 'best_per_ligand' specified. These runs cannot be "
f"passed into the analysis script. {non_bpl_runs}")
|
[
"dockstream.utils.files_paths.attach_root_path",
"json.load",
"argparse.ArgumentParser",
"dockstream.utils.enums.docking_enum.DockingConfigurationEnum",
"os.path.isdir",
"dockstream.utils.execute_external.execute.Executor",
"json.replace",
"os.path.isfile",
"os.strerror",
"os.path.normpath",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((247, 273), 'dockstream.utils.enums.docking_enum.DockingConfigurationEnum', 'DockingConfigurationEnum', ([], {}), '()\n', (271, 273), False, 'from dockstream.utils.enums.docking_enum import DockingConfigurationEnum\n'), ((1263, 1288), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (1276, 1288), False, 'import os\n'), ((1744, 1770), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (1758, 1770), False, 'import os\n'), ((2081, 2106), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (2094, 2106), False, 'import os\n'), ((2648, 2726), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Facilitates batch DockStream execution."""'}), "(description='Facilitates batch DockStream execution.')\n", (2671, 2726), False, 'import argparse\n'), ((2968, 2978), 'dockstream.utils.execute_external.execute.Executor', 'Executor', ([], {}), '()\n', (2976, 2978), False, 'from dockstream.utils.execute_external.execute import Executor\n'), ((1011, 1036), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (1024, 1036), False, 'import os\n'), ((1045, 1071), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (1059, 1071), False, 'import os\n'), ((1119, 1144), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (1130, 1144), False, 'import os\n'), ((1305, 1327), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (1315, 1327), False, 'import os\n'), ((1341, 1458), 'sys.exit', 'sys.exit', (["(input_path +\n ' folder is empty. Please ensure your DockStream json files are added to the folder.'\n )"], {}), "(input_path +\n ' folder is empty. Please ensure your DockStream json files are added to the folder.'\n )\n", (1349, 1458), False, 'import sys\n'), ((1829, 1914), 'sys.exit', 'sys.exit', (["(input_path + ' is not a json file. Please ensure it is in json format.')"], {}), "(input_path +\n ' is not a json file. Please ensure it is in json format.')\n", (1837, 1914), False, 'import sys\n'), ((2277, 2307), 'os.path.join', 'os.path.join', (['input_path', 'json'], {}), '(input_path, json)\n', (2289, 2307), False, 'import os\n'), ((3354, 3366), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3363, 3366), False, 'import json\n'), ((1543, 1667), 'sys.exit', 'sys.exit', (["(input_path +\n ' contains no json files. Please ensure your DockStream json files are added to the folder.'\n )"], {}), "(input_path +\n ' contains no json files. Please ensure your DockStream json files are added to the folder.'\n )\n", (1551, 1667), False, 'import sys\n'), ((2145, 2167), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (2155, 2167), False, 'import os\n'), ((2248, 2273), 'json.replace', 'json.replace', (['""".json"""', '""""""'], {}), "('.json', '')\n", (2260, 2273), False, 'import json\n'), ((2415, 2443), 'os.path.normpath', 'os.path.normpath', (['input_path'], {}), '(input_path)\n', (2431, 2443), False, 'import os\n'), ((3947, 3988), 'dockstream.utils.files_paths.attach_root_path', 'files_paths.attach_root_path', (['"""docker.py"""'], {}), "('docker.py')\n", (3975, 3988), False, 'from dockstream.utils import files_paths\n'), ((1506, 1528), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (1516, 1528), False, 'import os\n')]
|
#---------------------------------------------------
# Perform a bunch of experiments using CompareIntegerMaps.exe, and writes the results to results.txt.
# You can filter the experiments by name by passing a regular expression as a script argument.
# For example: run_tests.py LOOKUP_0_.*
# Results are also cached in an intermediate directory, temp, so you can add new results to results.txt
# without redoing previous experiments.
#---------------------------------------------------
import cmake_launcher
import math
import os
import re
import sys
from collections import defaultdict
from pprint import pprint
#GENERATOR = 'Visual Studio 10'
IGNORE_CACHE = False
#---------------------------------------------------
# TestLauncher
#---------------------------------------------------
class TestLauncher:
""" Configures, builds & runs CompareIntegerMaps using the specified options. """
DEFAULT_DEFS = {
'CACHE_STOMPER_ENABLED': 0,
'EXPERIMENT': 'INSERT',
'CONTAINER': 'TABLE',
}
def __init__(self):
cmakeBuilder = cmake_launcher.CMakeBuilder('..', generator=globals().get('GENERATOR'))
# It would be cool to get CMake to tell us the path to the executable instead.
self.launcher = cmake_launcher.CMakeLauncher(cmakeBuilder, 'CompareIntegerMaps.exe')
def run(self, seed, operationsPerGroup, keyCount, granularity, stompBytes, **defs):
args = [seed, operationsPerGroup, keyCount, granularity, stompBytes]
mergedDefs = dict(self.DEFAULT_DEFS)
mergedDefs.update(defs)
fullDefs = dict([('INTEGER_MAP_' + k, v) for k, v in mergedDefs.iteritems()])
self.launcher.ignoreCache = IGNORE_CACHE
output = self.launcher.run(*args, **fullDefs)
return eval(output)
#---------------------------------------------------
# Experiment
#---------------------------------------------------
class Experiment:
""" A group of CompareIntegerMaps runs using similar options but different seeds. """
def __init__(self, testLauncher, name, seeds, *args, **kwargs):
self.testLauncher = testLauncher
self.name = name
self.seeds = seeds
self.args = args
self.kwargs = kwargs
def run(self, results):
allGroups = defaultdict(list)
for seed in xrange(self.seeds):
print('Running %s #%d/%d...' % (self.name, seed + 1, self.seeds))
r = self.testLauncher.run(seed, *self.args, **self.kwargs)
for marker, units in r['results']:
allGroups[marker].append(units)
def medianAverage(values):
if len(values) >= 4:
values = sorted(values)[1:-1]
return sum(values) / len(values)
results[self.name] = [(marker, medianAverage(units)) for marker, units in sorted(allGroups.items())]
#---------------------------------------------------
# main
#---------------------------------------------------
if __name__ == '__main__':
from datetime import datetime
start = datetime.now()
os.chdir(os.path.split(sys.argv[0])[0])
filter = re.compile((sys.argv + ['.*'])[1])
if '--nocache' in sys.argv[1:]:
IGNORE_CACHE = True
results = {}
testLauncher = TestLauncher()
maxKeys = 18000000
granularity = 200
for container in ['TABLE', 'JUDY']:
experiment = Experiment(testLauncher,
'MEMORY_%s' % container,
8 if container == 'JUDY' else 1, 0, maxKeys, granularity, 0,
CONTAINER=container,
EXPERIMENT='MEMORY')
if filter.match(experiment.name):
experiment.run(results)
for stomp in [0, 1000, 10000]:
experiment = Experiment(testLauncher,
'INSERT_%d_%s' % (stomp, container),
8, 8000, maxKeys, granularity, stomp,
CONTAINER=container,
EXPERIMENT='INSERT',
CACHE_STOMPER_ENABLED=1 if stomp > 0 else 0)
if filter.match(experiment.name):
experiment.run(results)
experiment = Experiment(testLauncher,
'LOOKUP_%d_%s' % (stomp, container),
8, 8000, maxKeys, granularity, stomp,
CONTAINER=container,
EXPERIMENT='LOOKUP',
CACHE_STOMPER_ENABLED=1 if stomp > 0 else 0)
if filter.match(experiment.name):
experiment.run(results)
pprint(results, open('results.txt', 'w'))
print('Elapsed time: %s' % (datetime.now() - start))
|
[
"collections.defaultdict",
"cmake_launcher.CMakeLauncher",
"os.path.split",
"datetime.datetime.now",
"re.compile"
] |
[((3048, 3062), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3060, 3062), False, 'from datetime import datetime\n'), ((3121, 3155), 're.compile', 're.compile', (["(sys.argv + ['.*'])[1]"], {}), "((sys.argv + ['.*'])[1])\n", (3131, 3155), False, 'import re\n'), ((1261, 1329), 'cmake_launcher.CMakeLauncher', 'cmake_launcher.CMakeLauncher', (['cmakeBuilder', '"""CompareIntegerMaps.exe"""'], {}), "(cmakeBuilder, 'CompareIntegerMaps.exe')\n", (1289, 1329), False, 'import cmake_launcher\n'), ((2289, 2306), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2300, 2306), False, 'from collections import defaultdict\n'), ((3077, 3103), 'os.path.split', 'os.path.split', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (3090, 3103), False, 'import os\n'), ((4578, 4592), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4590, 4592), False, 'from datetime import datetime\n')]
|
"""
functionality for solving body-intersection problems. used by
`lhorizon.targeter`. currently contains only ray-sphere intersection solutions
but could also sensibly contain expressions for bodies of different shapes.
"""
from collections.abc import Callable, Sequence
import sympy as sp
# sympy symbols for ray-sphere equations
x, y, z, x0, y0, z0, mx, my, mz, d = sp.symbols(
"x,y,z,x0,y0,z0,m_x,m_y,m_z,d", real=True
)
def ray_sphere_equations(radius: float) -> list[sp.Eq]:
"""
generate a simple system of equations for intersections between
a ray with origin at (0, 0, 0) and direction vector [x, y, z]
and a sphere with radius == 'radius' and center (mx, my, mz).
"""
x_constraint = sp.Eq(x, x0 * d)
y_constraint = sp.Eq(y, y0 * d)
z_constraint = sp.Eq(z, z0 * d)
sphere_bound_constraint = sp.Eq(
((x - mx) ** 2 + (y - my) ** 2 + (z - mz) ** 2) ** (1 / 2), radius
)
return [x_constraint, y_constraint, z_constraint, sphere_bound_constraint]
def get_ray_sphere_solution(
radius: float, farside: bool = False
) -> tuple[sp.Expr]:
"""
produce a solution to the generalized ray-sphere equation for a body of
radius `radius`. by default, take the nearside solution. this produces a
tuple of sympy expressions objects, which are fairly slow to evaluate;
unless you are planning to further manipulate them, you would probably
rather call make_ray_sphere_lambdas().
"""
# sp.solve() returns the nearside solution first
selected_solution = 0
if farside:
selected_solution = 1
general_solution = sp.solve(ray_sphere_equations(radius), [x, y, z, d])[
selected_solution
]
return general_solution
def lambdify_system(
expressions: Sequence[sp.Expr],
expression_names: Sequence[str],
variables: Sequence[sp.Symbol],
) -> dict[str, Callable]:
"""
returns a dict of functions that substitute the symbols in 'variables'
into the expressions in 'expressions'. 'expression_names' serve as the
keys of the dict.
"""
return {
expression_name: sp.lambdify(variables, expression, "numpy")
for expression, expression_name in zip(expressions, expression_names)
}
def make_ray_sphere_lambdas(
radius: float, farside=False
) -> dict[str, Callable]:
"""
produce a dict of functions that return solutions for the ray-sphere
equation for a sphere of radius `radius`.
"""
return lambdify_system(
get_ray_sphere_solution(radius, farside),
["x", "y", "z", "d"],
[x0, y0, z0, mx, my, mz],
)
|
[
"sympy.symbols",
"sympy.Eq",
"sympy.lambdify"
] |
[((371, 424), 'sympy.symbols', 'sp.symbols', (['"""x,y,z,x0,y0,z0,m_x,m_y,m_z,d"""'], {'real': '(True)'}), "('x,y,z,x0,y0,z0,m_x,m_y,m_z,d', real=True)\n", (381, 424), True, 'import sympy as sp\n'), ((724, 740), 'sympy.Eq', 'sp.Eq', (['x', '(x0 * d)'], {}), '(x, x0 * d)\n', (729, 740), True, 'import sympy as sp\n'), ((760, 776), 'sympy.Eq', 'sp.Eq', (['y', '(y0 * d)'], {}), '(y, y0 * d)\n', (765, 776), True, 'import sympy as sp\n'), ((796, 812), 'sympy.Eq', 'sp.Eq', (['z', '(z0 * d)'], {}), '(z, z0 * d)\n', (801, 812), True, 'import sympy as sp\n'), ((843, 916), 'sympy.Eq', 'sp.Eq', (['(((x - mx) ** 2 + (y - my) ** 2 + (z - mz) ** 2) ** (1 / 2))', 'radius'], {}), '(((x - mx) ** 2 + (y - my) ** 2 + (z - mz) ** 2) ** (1 / 2), radius)\n', (848, 916), True, 'import sympy as sp\n'), ((2111, 2154), 'sympy.lambdify', 'sp.lambdify', (['variables', 'expression', '"""numpy"""'], {}), "(variables, expression, 'numpy')\n", (2122, 2154), True, 'import sympy as sp\n')]
|
import gym
import time
from connect_four.agents import DFPN
from connect_four.agents import difficult_connect_four_positions
from connect_four.evaluation.victor.victor_evaluator import Victor
from connect_four.hashing import ConnectFourHasher
from connect_four.transposition.sqlite_transposition_table import SQLiteTranspositionTable
env = gym.make('connect_four-v0')
env.reset()
evaluator = Victor(model=env)
hasher = ConnectFourHasher(env=env)
tt = SQLiteTranspositionTable(database_file="connect_four.db")
agent = DFPN(evaluator, hasher, tt)
start = time.time()
evaluation = agent.depth_first_proof_number_search(env=env)
end = time.time()
print(evaluation)
print("time to run = ", end - start)
tt.close()
|
[
"connect_four.evaluation.victor.victor_evaluator.Victor",
"gym.make",
"connect_four.agents.DFPN",
"time.time",
"connect_four.transposition.sqlite_transposition_table.SQLiteTranspositionTable",
"connect_four.hashing.ConnectFourHasher"
] |
[((343, 370), 'gym.make', 'gym.make', (['"""connect_four-v0"""'], {}), "('connect_four-v0')\n", (351, 370), False, 'import gym\n'), ((397, 414), 'connect_four.evaluation.victor.victor_evaluator.Victor', 'Victor', ([], {'model': 'env'}), '(model=env)\n', (403, 414), False, 'from connect_four.evaluation.victor.victor_evaluator import Victor\n'), ((424, 450), 'connect_four.hashing.ConnectFourHasher', 'ConnectFourHasher', ([], {'env': 'env'}), '(env=env)\n', (441, 450), False, 'from connect_four.hashing import ConnectFourHasher\n'), ((456, 513), 'connect_four.transposition.sqlite_transposition_table.SQLiteTranspositionTable', 'SQLiteTranspositionTable', ([], {'database_file': '"""connect_four.db"""'}), "(database_file='connect_four.db')\n", (480, 513), False, 'from connect_four.transposition.sqlite_transposition_table import SQLiteTranspositionTable\n'), ((522, 549), 'connect_four.agents.DFPN', 'DFPN', (['evaluator', 'hasher', 'tt'], {}), '(evaluator, hasher, tt)\n', (526, 549), False, 'from connect_four.agents import DFPN\n'), ((559, 570), 'time.time', 'time.time', ([], {}), '()\n', (568, 570), False, 'import time\n'), ((637, 648), 'time.time', 'time.time', ([], {}), '()\n', (646, 648), False, 'import time\n')]
|
#!/usr/bin/env python
import datetime
from lib.geo import interpolate_lat_lon, compute_bearing, offset_bearing
from lib.sequence import Sequence
import lib.io
import os
import sys
from lib.exifedit import ExifEdit
def interpolate_with_anchors(anchors, angle_offset):
'''
Interpolate gps position and compass angle given a list of anchors
anchor:
lat: latitude
lon: longitude
alt: altitude
datetime: date time of the anchor (datetime object)
num_image: number of images in between two anchors
'''
points = [ (a['datetime'], a['lat'], a['lon'], a.get('alt', 0)) for a in anchors]
inter_points = []
for i, (a1, a2) in enumerate(zip(points[:], points[1:])):
t1 = a1[0]
t2 = a2[0]
num_image = anchors[i]['num_image']
delta = (t2-t1).total_seconds()/float(num_image+1)
inter_points.append(points[i]+(0.0,))
for ii in xrange(num_image):
t = t1 + datetime.timedelta(seconds=(ii+1)*delta)
p = interpolate_lat_lon(points, t)
inter_points.append((t,)+p)
inter_points.append(points[-1]+(0,0,))
# get angles
bearings = [offset_bearing(compute_bearing(ll1[1], ll1[2], ll2[1], ll2[2]), angle_offset)
for ll1, ll2 in zip(inter_points, inter_points[1:])]
bearings.append(bearings[-1])
inter_points = [ (p[0], p[1], p[2], p[4], bearing) for p, bearing in zip(inter_points, bearings)]
return inter_points
def point(lat, lon, alt, datetime, num_image):
return {
'lat': lat,
'lon': lon,
'alt': alt,
'datetime': datetime,
'num_image': num_image
}
def test_run(image_path):
'''
Test run for images
'''
s = Sequence(image_path, check_exif=False)
file_list = s.get_file_list(image_path)
num_image = len(file_list)
t1 = datetime.datetime.strptime('2000_09_03_12_00_00', '%Y_%m_%d_%H_%M_%S')
t2 = datetime.datetime.strptime('2000_09_03_12_30_00', '%Y_%m_%d_%H_%M_%S')
p1 = point(0.5, 0.5, 0.2, t1, num_image-2)
p2 = point(0.55, 0.55, 0.0, t2, 0)
inter_points = interpolate_with_anchors([p1, p2], angle_offset=-90.0)
save_path = os.path.join(image_path, 'processed')
lib.io.mkdir_p(save_path)
assert(len(inter_points)==len(file_list))
for f, p in zip(file_list, inter_points):
meta = ExifEdit(f)
meta.add_lat_lon(p[1], p[2])
meta.add_altitude(p[3])
meta.add_date_time_original(p[0])
meta.add_orientation(1)
meta.add_direction(p[4])
meta.write()
|
[
"lib.geo.compute_bearing",
"lib.geo.interpolate_lat_lon",
"datetime.datetime.strptime",
"datetime.timedelta",
"os.path.join",
"lib.exifedit.ExifEdit",
"lib.sequence.Sequence"
] |
[((1821, 1859), 'lib.sequence.Sequence', 'Sequence', (['image_path'], {'check_exif': '(False)'}), '(image_path, check_exif=False)\n', (1829, 1859), False, 'from lib.sequence import Sequence\n'), ((1945, 2015), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2000_09_03_12_00_00"""', '"""%Y_%m_%d_%H_%M_%S"""'], {}), "('2000_09_03_12_00_00', '%Y_%m_%d_%H_%M_%S')\n", (1971, 2015), False, 'import datetime\n'), ((2025, 2095), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2000_09_03_12_30_00"""', '"""%Y_%m_%d_%H_%M_%S"""'], {}), "('2000_09_03_12_30_00', '%Y_%m_%d_%H_%M_%S')\n", (2051, 2095), False, 'import datetime\n'), ((2275, 2312), 'os.path.join', 'os.path.join', (['image_path', '"""processed"""'], {}), "(image_path, 'processed')\n", (2287, 2312), False, 'import os\n'), ((2452, 2463), 'lib.exifedit.ExifEdit', 'ExifEdit', (['f'], {}), '(f)\n', (2460, 2463), False, 'from lib.exifedit import ExifEdit\n'), ((1051, 1081), 'lib.geo.interpolate_lat_lon', 'interpolate_lat_lon', (['points', 't'], {}), '(points, t)\n', (1070, 1081), False, 'from lib.geo import interpolate_lat_lon, compute_bearing, offset_bearing\n'), ((1214, 1261), 'lib.geo.compute_bearing', 'compute_bearing', (['ll1[1]', 'll1[2]', 'll2[1]', 'll2[2]'], {}), '(ll1[1], ll1[2], ll2[1], ll2[2])\n', (1229, 1261), False, 'from lib.geo import interpolate_lat_lon, compute_bearing, offset_bearing\n'), ((994, 1038), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '((ii + 1) * delta)'}), '(seconds=(ii + 1) * delta)\n', (1012, 1038), False, 'import datetime\n')]
|
from glob import glob
from pathlib import Path
from typing import List, Any, Dict
import pytest
import requests
import json
from requests import Response
from mockserver_client.exceptions.mock_server_expectation_not_found_exception import (
MockServerExpectationNotFoundException,
)
from mockserver_client.exceptions.mock_server_json_content_mismatch_exception import (
MockServerJsonContentMismatchException,
)
from mockserver_client.mockserver_client import MockServerFriendlyClient
from mockserver_client.mockserver_verify_exception import MockServerVerifyException
def test_mock_server_from_file_multiple_calls_mismatch() -> None:
expectations_dir: Path = Path(__file__).parent.joinpath("./expectations")
requests_dir: Path = Path(__file__).parent.joinpath("./requests")
test_name = "test_mock_server"
mock_server_url = "http://mock-server:1080"
mock_client: MockServerFriendlyClient = MockServerFriendlyClient(
base_url=mock_server_url
)
mock_client.clear(f"/{test_name}/*")
mock_client.reset()
mock_client.expect_files_as_json_requests(
expectations_dir, path=f"/{test_name}/foo/1/merge", json_response_body={}
)
mock_client.expect_default()
http = requests.Session()
file_path: str
files: List[str] = sorted(
glob(str(requests_dir.joinpath("**/*.json")), recursive=True)
)
for file_path in files:
with open(file_path, "r") as file:
content: Dict[str, Any] = json.loads(file.read())
response: Response = http.post(
mock_server_url + "/" + test_name + "/foo/1/merge",
json=[content],
)
assert response.ok
with pytest.raises(MockServerVerifyException):
try:
mock_client.verify_expectations(test_name=test_name)
except MockServerVerifyException as e:
# there should be two expectations.
# One for the content not matching and one for the expectation not triggered
assert len(e.exceptions) == 2
json_content_mismatch_exceptions: List[
MockServerJsonContentMismatchException
] = [
e1
for e1 in e.exceptions
if isinstance(e1, MockServerJsonContentMismatchException)
]
assert len(json_content_mismatch_exceptions) == 1
expectation_not_found_exceptions: List[
MockServerExpectationNotFoundException
] = [
e1
for e1 in e.exceptions
if isinstance(e1, MockServerExpectationNotFoundException)
]
assert len(expectation_not_found_exceptions) == 1
print(str(e))
raise e
|
[
"pytest.raises",
"mockserver_client.mockserver_client.MockServerFriendlyClient",
"requests.Session",
"pathlib.Path"
] |
[((925, 975), 'mockserver_client.mockserver_client.MockServerFriendlyClient', 'MockServerFriendlyClient', ([], {'base_url': 'mock_server_url'}), '(base_url=mock_server_url)\n', (949, 975), False, 'from mockserver_client.mockserver_client import MockServerFriendlyClient\n'), ((1237, 1255), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1253, 1255), False, 'import requests\n'), ((1715, 1755), 'pytest.raises', 'pytest.raises', (['MockServerVerifyException'], {}), '(MockServerVerifyException)\n', (1728, 1755), False, 'import pytest\n'), ((677, 691), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (681, 691), False, 'from pathlib import Path\n'), ((751, 765), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (755, 765), False, 'from pathlib import Path\n')]
|
import unittest
from sim.battle import Battle
from data import dex
class TestAcrobatics(unittest.TestCase):
def test_acrobatics(self):
b = Battle(debug=False, rng=False)
b.join(0, [{'species': 'charmander', 'moves': ['tackle']}])
b.join(1, [{'species': 'pidgey', 'moves': ['acrobatics']}])
b.choose(0, dex.Decision('move', 0))
b.choose(1, dex.Decision('move', 0))
b.do_turn()
charmander = b.sides[0].pokemon[0]
pidgey = b.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(charmander.hp, charmander.maxhp-76)
def test_acrobatics_noitem(self):
b = Battle(debug=False, rng=False)
b.join(0, [{'species': 'charmander', 'moves': ['tackle']}])
b.join(1, [{'species': 'pidgey', 'item': 'pokeball','moves': ['acrobatics']}])
b.choose(0, dex.Decision('move', 0))
b.choose(1, dex.Decision('move', 0))
b.do_turn()
charmander = b.sides[0].pokemon[0]
pidgey = b.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(charmander.hp, charmander.maxhp-39)
def runTest(self):
self.test_acrobatics()
self.test_acrobatics_noitem()
|
[
"data.dex.Decision",
"sim.battle.Battle"
] |
[((161, 191), 'sim.battle.Battle', 'Battle', ([], {'debug': '(False)', 'rng': '(False)'}), '(debug=False, rng=False)\n', (167, 191), False, 'from sim.battle import Battle\n'), ((675, 705), 'sim.battle.Battle', 'Battle', ([], {'debug': '(False)', 'rng': '(False)'}), '(debug=False, rng=False)\n', (681, 705), False, 'from sim.battle import Battle\n'), ((349, 372), 'data.dex.Decision', 'dex.Decision', (['"""move"""', '(0)'], {}), "('move', 0)\n", (361, 372), False, 'from data import dex\n'), ((394, 417), 'data.dex.Decision', 'dex.Decision', (['"""move"""', '(0)'], {}), "('move', 0)\n", (406, 417), False, 'from data import dex\n'), ((882, 905), 'data.dex.Decision', 'dex.Decision', (['"""move"""', '(0)'], {}), "('move', 0)\n", (894, 905), False, 'from data import dex\n'), ((927, 950), 'data.dex.Decision', 'dex.Decision', (['"""move"""', '(0)'], {}), "('move', 0)\n", (939, 950), False, 'from data import dex\n')]
|
import requests
from pdf4me.helper.json_converter import JsonConverter
from pdf4me.helper.pdf4me_exceptions import Pdf4meClientException, Pdf4meBackendException
from pdf4me.helper.response_checker import ResponseChecker
# from pdf4me.helper.token_generator import TokenGenerator
class CustomHttp(object):
def __init__(self, token, apiurl):
self.token = token
self.json_converter = JsonConverter()
self.url = "https://api.pdf4me.com/"
if apiurl is not None and len(apiurl) != 0:
self.url = apiurl
self.userAgent = "pdf4me-python/0.8.24"
def post_universal_object(self, universal_object, controller):
"""Sends a post request to the specified controller with the given
universal_object as a body.
:param universal_object: object to be sent
:type universal_object: object
:param controller: swagger controller
:type controller: str
:return: post response
"""
# prepare post request
request_url = self.url + controller
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Basic ' + self.token,
'User-Agent': self.userAgent
}
# convert body to json
body = self.json_converter.dump(element=universal_object)
# send request
res = requests.post(request_url, data=body, headers=headers)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
# read content from response
json_response = self.json_converter.load(res.text)
return json_response
def post_wrapper(self, octet_streams, values, controller):
"""Builds a post requests from the given parameters.
:param octet_streams: (key: file identifier, value: open(fileName, 'rb'))) pairs
:type octet_streams: list
:param values: (key: identifier of value, value: content of value) pairs
:type values: list
:param controller: swagger controller
:type controller: str
:return: post response
"""
# prepare post request
request_url = self.url + controller
header = {'Authorization': 'Basic ' + self.token, 'User-Agent': self.userAgent}
# build files
if octet_streams is not None and len(octet_streams) != 0:
files = {key: value for (key, value) in octet_streams}
else:
files = None
# build values
if len(values) != 0:
data = {key: value for (key, value) in values}
else:
data = None
# send request
if files is None:
if data is None:
raise Pdf4meClientException("Please provide at least one value or an octet-stream.")
else:
res = requests.post(request_url, data=data, headers=header)
else:
if data is None:
res = requests.post(request_url, files=files, headers=header)
else:
res = requests.post(request_url, files=files, data=data, headers=header)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
return res.content
def get_object(self, query_strings, controller):
"""Sends a get request to the specified controller with the given query strings.
:param query_strings: params to be sent
:type query_strings: str
:param controller: swagger controller
:type controller: str
:return: post response
"""
# prepare post request
request_url = self.url + controller
headers = {
'Authorization': 'Basic ' + self.token,
'User-Agent': self.userAgent
}
# send request
res = requests.get(request_url, data=query_strings, headers=headers)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
# read content from response
json_response = self.json_converter.load(res.text)
return json_response
def get_wrapper(self, query_strings, controller):
"""Sends a get request to the specified controller with the given
query string and returns a file
:param query_strings: params to be sent
:type query_strings: str
:param controller: swagger controller
:type controller: str
:return: file
"""
# prepare post request
request_url = self.url + controller
headers = {
'Authorization': 'Basic ' + self.token,
'User-Agent': self.userAgent
}
# send request
res = requests.get(request_url, data=query_strings, headers=headers)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
return res.content
def __check_status_code(self, response):
'''
Checks whether the status code is either 200 or 204, otw. throws a Pdf4meBackendException.
:param response: post response
:type response: requests.Response
:return: None
'''
status_code = response.status_code
status_reason = response.reason
if status_code == 500:
server_error = self.json_converter.load(response.text)['error_message']
trace_id = self.json_converter.load(response.text)['trace_id']
raise Pdf4meBackendException('HTTP 500 ' + status_reason + " : trace_id " + trace_id + " : " + server_error)
elif status_code != 200 and status_code != 204:
error = response.text
raise Pdf4meBackendException('HTTP ' + str(status_code) + ': ' + status_reason + " : " + error)
def __check_docLogs_for_error_messages(self, response):
'''
Checks whether the HTTP response's docLogs contain any error message, in case of an error
a Pdf4meBackendException is thrown.
:param response: post response
:type response: requests.Response
:return: None
'''
ResponseChecker().check_response_for_errors(response.text)
|
[
"pdf4me.helper.pdf4me_exceptions.Pdf4meClientException",
"pdf4me.helper.pdf4me_exceptions.Pdf4meBackendException",
"requests.get",
"requests.post",
"pdf4me.helper.response_checker.ResponseChecker",
"pdf4me.helper.json_converter.JsonConverter"
] |
[((407, 422), 'pdf4me.helper.json_converter.JsonConverter', 'JsonConverter', ([], {}), '()\n', (420, 422), False, 'from pdf4me.helper.json_converter import JsonConverter\n'), ((1412, 1466), 'requests.post', 'requests.post', (['request_url'], {'data': 'body', 'headers': 'headers'}), '(request_url, data=body, headers=headers)\n', (1425, 1466), False, 'import requests\n'), ((4018, 4080), 'requests.get', 'requests.get', (['request_url'], {'data': 'query_strings', 'headers': 'headers'}), '(request_url, data=query_strings, headers=headers)\n', (4030, 4080), False, 'import requests\n'), ((4978, 5040), 'requests.get', 'requests.get', (['request_url'], {'data': 'query_strings', 'headers': 'headers'}), '(request_url, data=query_strings, headers=headers)\n', (4990, 5040), False, 'import requests\n'), ((5798, 5904), 'pdf4me.helper.pdf4me_exceptions.Pdf4meBackendException', 'Pdf4meBackendException', (["('HTTP 500 ' + status_reason + ' : trace_id ' + trace_id + ' : ' + server_error\n )"], {}), "('HTTP 500 ' + status_reason + ' : trace_id ' +\n trace_id + ' : ' + server_error)\n", (5820, 5904), False, 'from pdf4me.helper.pdf4me_exceptions import Pdf4meClientException, Pdf4meBackendException\n'), ((2844, 2922), 'pdf4me.helper.pdf4me_exceptions.Pdf4meClientException', 'Pdf4meClientException', (['"""Please provide at least one value or an octet-stream."""'], {}), "('Please provide at least one value or an octet-stream.')\n", (2865, 2922), False, 'from pdf4me.helper.pdf4me_exceptions import Pdf4meClientException, Pdf4meBackendException\n'), ((2963, 3016), 'requests.post', 'requests.post', (['request_url'], {'data': 'data', 'headers': 'header'}), '(request_url, data=data, headers=header)\n', (2976, 3016), False, 'import requests\n'), ((3082, 3137), 'requests.post', 'requests.post', (['request_url'], {'files': 'files', 'headers': 'header'}), '(request_url, files=files, headers=header)\n', (3095, 3137), False, 'import requests\n'), ((3178, 3244), 'requests.post', 'requests.post', (['request_url'], {'files': 'files', 'data': 'data', 'headers': 'header'}), '(request_url, files=files, data=data, headers=header)\n', (3191, 3244), False, 'import requests\n'), ((6439, 6456), 'pdf4me.helper.response_checker.ResponseChecker', 'ResponseChecker', ([], {}), '()\n', (6454, 6456), False, 'from pdf4me.helper.response_checker import ResponseChecker\n')]
|
# General
import numpy as np
import random
import argparse
import json
import commentjson
import joblib
import os
import pathlib
from collections import OrderedDict
# Pytorch
import torch
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
# Optuna
import optuna
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
from . import SubGNN as md
from SubGNN import config
def parse_arguments():
"""
Read in the config file specifying all of the parameters
"""
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument("-config_path", type=str, default=None, help="Load config file")
args = parser.parse_args()
return args
def read_json(fname):
"""
Read in the json file specified by 'fname'
"""
with open(fname, "rt") as handle:
return commentjson.load(handle, object_hook=OrderedDict)
def get_optuna_suggest(param_dict, name, trial):
"""
Returns a suggested value for the hyperparameter specified by 'name' from the range
of values in 'param_dict'
name: string specifying hyperparameter
trial: optuna trial
param_dict: dictionary containing information about the hyperparameter (range of
values & type of sampler)
e.g.{
"type" : "suggest_categorical",
"args" : [[ 64, 128]]
}
"""
module_name = param_dict["type"] # e.g. suggest_categorical, suggest_float
args = [name]
args.extend(
param_dict["args"]
) # resulting list will look something like this ['batch_size', [ 64, 128]]
if "kwargs" in param_dict:
kwargs = dict(param_dict["kwargs"])
return getattr(trial, module_name)(*args, **kwargs)
else:
return getattr(trial, module_name)(*args)
def get_hyperparams_optuna(run_config, trial):
"""
Converts the fixed and variable hyperparameters in the run config to a dictionary of
the final hyperparameters
Returns: hyp_fix - dictionary where key is the hyperparameter name (e.g. batch_size)
and value is the hyperparameter value
"""
# initialize the dict with the fixed hyperparameters
hyp_fix = dict(run_config["hyperparams_fix"])
# update the dict with variable value hyperparameters by sampling a hyperparameter
# value from the range specified in the run_config
hyp_optuna = {
k: get_optuna_suggest(run_config["hyperparams_optuna"][k], k, trial)
for k in dict(run_config["hyperparams_optuna"]).keys()
}
hyp_fix.update(hyp_optuna)
return hyp_fix
def build_model(run_config, trial=None):
"""
Creates SubGNN from the hyperparameters specified in the run config
"""
# get hyperparameters for the current trial
hyperparameters = get_hyperparams_optuna(run_config, trial)
# Set seeds for reproducibility
torch.manual_seed(hyperparameters["seed"])
np.random.seed(hyperparameters["seed"])
torch.cuda.manual_seed(hyperparameters["seed"])
torch.cuda.manual_seed_all(hyperparameters["seed"])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# initialize SubGNN
model = md.SubGNN_Chem(
hyperparameters,
run_config["graph_path"],
run_config["subgraphs_path"],
run_config["embedding_path"],
run_config["similarities_path"],
run_config["shortest_paths_path"],
run_config["degree_sequence_path"],
run_config["ego_graph_path"],
)
return model, hyperparameters
def build_trainer(run_config, hyperparameters, trial=None):
"""
Set up optuna trainer
"""
if "progress_bar_refresh_rate" in hyperparameters:
p_refresh = hyperparameters["progress_bar_refresh_rate"]
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs = {
"max_epochs": hyperparameters["max_epochs"],
"gpus": 0 if "no_gpu" in run_config else 1,
"num_sanity_val_steps": 0,
"progress_bar_refresh_rate": p_refresh,
"gradient_clip_val": hyperparameters["grad_clip"],
}
# set auto learning rate finder param
if "auto_lr_find" in hyperparameters and hyperparameters["auto_lr_find"]:
trainer_kwargs["auto_lr_find"] = hyperparameters["auto_lr_find"]
# Create tensorboard logger
lgdir = os.path.join(run_config["tb"]["dir_full"], run_config["tb"]["name"])
if not os.path.exists(lgdir):
os.makedirs(lgdir)
logger = TensorBoardLogger(
run_config["tb"]["dir_full"],
name=run_config["tb"]["name"],
version="version_" + str(random.randint(0, 10000000)),
)
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# Save top three model checkpoints
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath=os.path.join(
logger.log_dir, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"
),
save_top_k=3,
verbose=True,
monitor=run_config["optuna"]["monitor_metric"],
mode="max",
)
# if we use pruning, use the pytorch lightning pruning callback
if run_config["optuna"]["pruning"]:
trainer_kwargs["early_stop_callback"] = PyTorchLightningPruningCallback(
trial, monitor=run_config["optuna"]["monitor_metric"]
)
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, logger.log_dir
def train_model(run_config, trial=None):
"""
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
"""
# get model and hyperparameter dict
model, hyperparameters = build_model(run_config, trial)
# build optuna trainer
trainer, trainer_kwargs, results_path = build_trainer(
run_config, hyperparameters, trial
)
# dump hyperparameters to results dir
hparam_file = open(os.path.join(results_path, "hyperparams.json"), "w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
# dump trainer args to results dir
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"), "w")
pop_keys = [
key
for key in ["logger", "profiler", "early_stop_callback", "checkpoint_callback"]
if key in trainer_kwargs.keys()
]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# train the model
trainer.fit(model)
# write results to the results dir
if results_path is not None:
hparam_file = open(os.path.join(results_path, "final_metric_scores.json"), "w")
results_serializable = {k: float(v) for k, v in model.metric_scores[-1].items()}
hparam_file.write(json.dumps(results_serializable, indent=4))
hparam_file.close()
# return the max (or min) metric specified by 'monitor_metric' in the run config
all_scores = [
score[run_config["optuna"]["monitor_metric"]].numpy()
for score in model.metric_scores
]
if run_config["optuna"]["opt_direction"] == "maximize":
return np.max(all_scores)
else:
return np.min(all_scores)
def main():
"""
Perform an optuna run according to the hyperparameters and directory locations
specified in 'config_path'
"""
torch.autograd.set_detect_anomaly(True)
args = parse_arguments()
# read in config file
run_config = read_json(args.config_path)
# Set paths to data
task = run_config["data"]["task"]
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
run_config["subgraphs_path"] = os.path.join(task, "subgraphs.pth")
run_config["graph_path"] = os.path.join(task, "edge_list.txt")
run_config["shortest_paths_path"] = os.path.join(task, "shortest_path_matrix.npy")
run_config["degree_sequence_path"] = os.path.join(task, "degree_sequence.txt")
run_config["ego_graph_path"] = os.path.join(task, "ego_graphs.txt")
# directory where similarity calculations will be stored
run_config["similarities_path"] = os.path.join(task, "similarities/")
# get location of node embeddings
run_config["embedding_path"] = os.path.join(task, "atom_features.pth")
# create a tensorboard directory in the folder specified by dir in the PROJECT ROOT
# folder
if "local" in run_config["tb"] and run_config["tb"]["local"]:
run_config["tb"]["dir_full"] = run_config["tb"]["dir"]
else:
run_config["tb"]["dir_full"] = os.path.join(
config.PROJECT_ROOT, run_config["tb"]["dir"]
)
ntrials = run_config["optuna"]["opt_n_trials"]
print(f"Running {ntrials} Trials of optuna")
if run_config["optuna"]["pruning"]:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
# the complete study path is the tensorboard directory + the study name
run_config["study_path"] = os.path.join(
run_config["tb"]["dir_full"], run_config["tb"]["name"]
)
print("Logging to ", run_config["study_path"])
pathlib.Path(run_config["study_path"]).mkdir(parents=True, exist_ok=True)
# get database file
db_file = os.path.join(run_config["study_path"], "optuna_study_sqlite.db")
# specify sampler
if (
run_config["optuna"]["sampler"] == "grid"
and "grid_search_space" in run_config["optuna"]
):
sampler = optuna.samplers.GridSampler(run_config["optuna"]["grid_search_space"])
elif run_config["optuna"]["sampler"] == "tpe":
sampler = optuna.samplers.TPESampler()
elif run_config["optuna"]["sampler"] == "random":
sampler = optuna.samplers.RandomSampler()
# create an optuna study with the specified sampler, pruner, direction (e.g.
# maximize) A SQLlite database is used to keep track of results Will load in
# existing study if one exists
study = optuna.create_study(
direction=run_config["optuna"]["opt_direction"],
sampler=sampler,
pruner=pruner,
storage="sqlite:///" + db_file,
study_name=run_config["study_path"],
load_if_exists=True,
)
study.optimize(
lambda trial: train_model(run_config, trial),
n_trials=run_config["optuna"]["opt_n_trials"],
n_jobs=run_config["optuna"]["opt_n_cores"],
)
optuna_results_path = os.path.join(run_config["study_path"], "optuna_study.pkl")
print("Saving Study Results to", optuna_results_path)
joblib.dump(study, optuna_results_path)
print(study.best_params)
if __name__ == "__main__":
main()
|
[
"pytorch_lightning.Trainer",
"numpy.random.seed",
"argparse.ArgumentParser",
"joblib.dump",
"json.dumps",
"pathlib.Path",
"torch.autograd.set_detect_anomaly",
"optuna.integration.PyTorchLightningPruningCallback",
"os.path.join",
"optuna.samplers.TPESampler",
"random.randint",
"os.path.exists",
"numpy.max",
"optuna.samplers.GridSampler",
"torch.manual_seed",
"torch.cuda.manual_seed",
"optuna.samplers.RandomSampler",
"numpy.min",
"optuna.pruners.MedianPruner",
"os.makedirs",
"torch.cuda.manual_seed_all",
"commentjson.load",
"optuna.create_study"
] |
[((602, 666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Learn subgraph embeddings"""'}), "(description='Learn subgraph embeddings')\n", (625, 666), False, 'import argparse\n'), ((2974, 3016), 'torch.manual_seed', 'torch.manual_seed', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (2991, 3016), False, 'import torch\n'), ((3021, 3060), 'numpy.random.seed', 'np.random.seed', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (3035, 3060), True, 'import numpy as np\n'), ((3065, 3112), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (3087, 3112), False, 'import torch\n'), ((3117, 3168), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (3143, 3168), False, 'import torch\n'), ((4519, 4587), 'os.path.join', 'os.path.join', (["run_config['tb']['dir_full']", "run_config['tb']['name']"], {}), "(run_config['tb']['dir_full'], run_config['tb']['name'])\n", (4531, 4587), False, 'import os\n'), ((5634, 5662), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {}), '(**trainer_kwargs)\n', (5644, 5662), True, 'import pytorch_lightning as pl\n'), ((7689, 7728), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (7722, 7728), False, 'import torch\n'), ((8016, 8051), 'os.path.join', 'os.path.join', (['task', '"""subgraphs.pth"""'], {}), "(task, 'subgraphs.pth')\n", (8028, 8051), False, 'import os\n'), ((8083, 8118), 'os.path.join', 'os.path.join', (['task', '"""edge_list.txt"""'], {}), "(task, 'edge_list.txt')\n", (8095, 8118), False, 'import os\n'), ((8159, 8205), 'os.path.join', 'os.path.join', (['task', '"""shortest_path_matrix.npy"""'], {}), "(task, 'shortest_path_matrix.npy')\n", (8171, 8205), False, 'import os\n'), ((8247, 8288), 'os.path.join', 'os.path.join', (['task', '"""degree_sequence.txt"""'], {}), "(task, 'degree_sequence.txt')\n", (8259, 8288), False, 'import os\n'), ((8324, 8360), 'os.path.join', 'os.path.join', (['task', '"""ego_graphs.txt"""'], {}), "(task, 'ego_graphs.txt')\n", (8336, 8360), False, 'import os\n'), ((8461, 8496), 'os.path.join', 'os.path.join', (['task', '"""similarities/"""'], {}), "(task, 'similarities/')\n", (8473, 8496), False, 'import os\n'), ((8571, 8610), 'os.path.join', 'os.path.join', (['task', '"""atom_features.pth"""'], {}), "(task, 'atom_features.pth')\n", (8583, 8610), False, 'import os\n'), ((9300, 9368), 'os.path.join', 'os.path.join', (["run_config['tb']['dir_full']", "run_config['tb']['name']"], {}), "(run_config['tb']['dir_full'], run_config['tb']['name'])\n", (9312, 9368), False, 'import os\n'), ((9551, 9615), 'os.path.join', 'os.path.join', (["run_config['study_path']", '"""optuna_study_sqlite.db"""'], {}), "(run_config['study_path'], 'optuna_study_sqlite.db')\n", (9563, 9615), False, 'import os\n'), ((10262, 10460), 'optuna.create_study', 'optuna.create_study', ([], {'direction': "run_config['optuna']['opt_direction']", 'sampler': 'sampler', 'pruner': 'pruner', 'storage': "('sqlite:///' + db_file)", 'study_name': "run_config['study_path']", 'load_if_exists': '(True)'}), "(direction=run_config['optuna']['opt_direction'],\n sampler=sampler, pruner=pruner, storage='sqlite:///' + db_file,\n study_name=run_config['study_path'], load_if_exists=True)\n", (10281, 10460), False, 'import optuna\n'), ((10723, 10781), 'os.path.join', 'os.path.join', (["run_config['study_path']", '"""optuna_study.pkl"""'], {}), "(run_config['study_path'], 'optuna_study.pkl')\n", (10735, 10781), False, 'import os\n'), ((10844, 10883), 'joblib.dump', 'joblib.dump', (['study', 'optuna_results_path'], {}), '(study, optuna_results_path)\n', (10855, 10883), False, 'import joblib\n'), ((943, 992), 'commentjson.load', 'commentjson.load', (['handle'], {'object_hook': 'OrderedDict'}), '(handle, object_hook=OrderedDict)\n', (959, 992), False, 'import commentjson\n'), ((4599, 4620), 'os.path.exists', 'os.path.exists', (['lgdir'], {}), '(lgdir)\n', (4613, 4620), False, 'import os\n'), ((4630, 4648), 'os.makedirs', 'os.makedirs', (['lgdir'], {}), '(lgdir)\n', (4641, 4648), False, 'import os\n'), ((4838, 4868), 'os.path.exists', 'os.path.exists', (['logger.log_dir'], {}), '(logger.log_dir)\n', (4852, 4868), False, 'import os\n'), ((4878, 4905), 'os.makedirs', 'os.makedirs', (['logger.log_dir'], {}), '(logger.log_dir)\n', (4889, 4905), False, 'import os\n'), ((5510, 5601), 'optuna.integration.PyTorchLightningPruningCallback', 'PyTorchLightningPruningCallback', (['trial'], {'monitor': "run_config['optuna']['monitor_metric']"}), "(trial, monitor=run_config['optuna'][\n 'monitor_metric'])\n", (5541, 5601), False, 'from optuna.integration import PyTorchLightningPruningCallback\n'), ((6241, 6287), 'os.path.join', 'os.path.join', (['results_path', '"""hyperparams.json"""'], {}), "(results_path, 'hyperparams.json')\n", (6253, 6287), False, 'import os\n'), ((6316, 6353), 'json.dumps', 'json.dumps', (['hyperparameters'], {'indent': '(4)'}), '(hyperparameters, indent=4)\n', (6326, 6353), False, 'import json\n'), ((6442, 6491), 'os.path.join', 'os.path.join', (['results_path', '"""trainer_kwargs.json"""'], {}), "(results_path, 'trainer_kwargs.json')\n", (6454, 6491), False, 'import os\n'), ((6733, 6769), 'json.dumps', 'json.dumps', (['trainer_kwargs'], {'indent': '(4)'}), '(trainer_kwargs, indent=4)\n', (6743, 6769), False, 'import json\n'), ((7478, 7496), 'numpy.max', 'np.max', (['all_scores'], {}), '(all_scores)\n', (7484, 7496), True, 'import numpy as np\n'), ((7522, 7540), 'numpy.min', 'np.min', (['all_scores'], {}), '(all_scores)\n', (7528, 7540), True, 'import numpy as np\n'), ((8891, 8949), 'os.path.join', 'os.path.join', (['config.PROJECT_ROOT', "run_config['tb']['dir']"], {}), "(config.PROJECT_ROOT, run_config['tb']['dir'])\n", (8903, 8949), False, 'import os\n'), ((9130, 9159), 'optuna.pruners.MedianPruner', 'optuna.pruners.MedianPruner', ([], {}), '()\n', (9157, 9159), False, 'import optuna\n'), ((9779, 9849), 'optuna.samplers.GridSampler', 'optuna.samplers.GridSampler', (["run_config['optuna']['grid_search_space']"], {}), "(run_config['optuna']['grid_search_space'])\n", (9806, 9849), False, 'import optuna\n'), ((5115, 5207), 'os.path.join', 'os.path.join', (['logger.log_dir', '"""{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"""'], {}), "(logger.log_dir,\n '{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}')\n", (5127, 5207), False, 'import os\n'), ((6941, 6995), 'os.path.join', 'os.path.join', (['results_path', '"""final_metric_scores.json"""'], {}), "(results_path, 'final_metric_scores.json')\n", (6953, 6995), False, 'import os\n'), ((7117, 7159), 'json.dumps', 'json.dumps', (['results_serializable'], {'indent': '(4)'}), '(results_serializable, indent=4)\n', (7127, 7159), False, 'import json\n'), ((9438, 9476), 'pathlib.Path', 'pathlib.Path', (["run_config['study_path']"], {}), "(run_config['study_path'])\n", (9450, 9476), False, 'import pathlib\n'), ((9919, 9947), 'optuna.samplers.TPESampler', 'optuna.samplers.TPESampler', ([], {}), '()\n', (9945, 9947), False, 'import optuna\n'), ((10020, 10051), 'optuna.samplers.RandomSampler', 'optuna.samplers.RandomSampler', ([], {}), '()\n', (10049, 10051), False, 'import optuna\n'), ((4791, 4818), 'random.randint', 'random.randint', (['(0)', '(10000000)'], {}), '(0, 10000000)\n', (4805, 4818), False, 'import random\n')]
|
import math
import pandas as pd
from transformers import Trainer, TrainingArguments
from .config import RecconEmotionEntailmentConfig
from .data_class import RecconEmotionEntailmentArguments
from .modeling import RecconEmotionEntailmentModel
from .tokenization import RecconEmotionEntailmentTokenizer
from .utils import (
RecconEmotionEntailmentData,
convert_df_to_dataset,
parse_args_and_load_config,
)
def train_model(train_config: RecconEmotionEntailmentArguments):
"""
Method for training RecconEmotionEntailmentModel.
Args:
train_config (:obj:`RecconEmotionEntailmentArguments`):
RecconEmotionEntailmentArguments config load from config file.
Example::
import json
from sgnlp.models.emotion_entailment import train
from sgnlp.models.emotion_entailment.utils import parse_args_and_load_config
config = parse_args_and_load_config('config/emotion_entailment_config.json')
train(config)
"""
config = RecconEmotionEntailmentConfig.from_pretrained(train_config.model_name)
tokenizer = RecconEmotionEntailmentTokenizer.from_pretrained(train_config.model_name)
model = RecconEmotionEntailmentModel.from_pretrained(train_config.model_name, config=config)
train_df = pd.read_csv(train_config.x_train_path)
val_df = pd.read_csv(train_config.x_valid_path)
train_dataset = convert_df_to_dataset(
df=train_df, max_seq_length=train_config.max_seq_length, tokenizer=tokenizer
)
val_dataset = convert_df_to_dataset(
df=val_df, max_seq_length=train_config.max_seq_length, tokenizer=tokenizer
)
train_config.len = len(train_df)
train_config.train_args["eval_steps"] = (
train_config.len / train_config.train_args["per_device_train_batch_size"]
)
train_config.train_args["warmup_steps"] = math.ceil(
(
train_config.len
// train_config.train_args["gradient_accumulation_steps"]
* train_config.train_args["num_train_epochs"]
)
* train_config.train_args["warmup_ratio"]
)
train_args = TrainingArguments(**train_config.train_args)
trainer = Trainer(
model=model,
args=train_args,
train_dataset=RecconEmotionEntailmentData(train_dataset),
eval_dataset=RecconEmotionEntailmentData(val_dataset),
)
trainer.train()
trainer.save_model()
if __name__ == "__main__":
cfg = parse_args_and_load_config()
train_model(cfg)
|
[
"pandas.read_csv",
"transformers.TrainingArguments",
"math.ceil"
] |
[((1280, 1318), 'pandas.read_csv', 'pd.read_csv', (['train_config.x_train_path'], {}), '(train_config.x_train_path)\n', (1291, 1318), True, 'import pandas as pd\n'), ((1332, 1370), 'pandas.read_csv', 'pd.read_csv', (['train_config.x_valid_path'], {}), '(train_config.x_valid_path)\n', (1343, 1370), True, 'import pandas as pd\n'), ((1857, 2040), 'math.ceil', 'math.ceil', (["(train_config.len // train_config.train_args['gradient_accumulation_steps'] *\n train_config.train_args['num_train_epochs'] * train_config.train_args[\n 'warmup_ratio'])"], {}), "(train_config.len // train_config.train_args[\n 'gradient_accumulation_steps'] * train_config.train_args[\n 'num_train_epochs'] * train_config.train_args['warmup_ratio'])\n", (1866, 2040), False, 'import math\n'), ((2131, 2175), 'transformers.TrainingArguments', 'TrainingArguments', ([], {}), '(**train_config.train_args)\n', (2148, 2175), False, 'from transformers import Trainer, TrainingArguments\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 18:49:46 2021
@author: wanderer
"""
### Housekeeping ###
import pandas as pd
import pandas_datareader.data as web
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
sns.set_style('white', {"xtick.major.size": 2, "ytick.major.size": 2})
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71","#f4cae4"]
sns.set_palette(sns.color_palette(flatui,7))
from dateutil.relativedelta import relativedelta
save_loc = '~/Desktop/hu'
redownload = True
if redownload:
f1 = 'USREC' # recession data from FRED
f2 = 'M2SL' # M2 from FRED
start = pd.to_datetime('1959-12-01')
end = pd.to_datetime('2020-12-31')
M2 = web.DataReader([f2], 'fred', start, end)
data_line = M2.pct_change()
data_line = data_line.apply(lambda x: 12*x)
data_shade = web.DataReader([f1], 'fred', start, end)
data = data_shade.join(data_line, how='outer').dropna()
data.to_pickle(save_loc + r'/M2SL.pkl')
data = pd.read_pickle(save_loc + r'/M2SL.pkl')
# recessions are marked as 1 in the data
recs = data.query('USREC==1')
plot_cols = ['M2SL']
mpl.rcParams['font.family'] = 'Helvetica Neue'
fig, axes = plt.subplots(1,1, figsize=(12,6), sharex=True)
data[plot_cols].plot(subplots=True, ax=axes, marker='o', ms=3)
col = plot_cols
ax = axes
for month in recs.index:
ax.axvspan(month, month+ relativedelta(months=+1),
color=sns.xkcd_rgb['grey'], alpha=0.5)
# lets add horizontal zero lines
ax.axhline(0, color='k', linestyle='-', linewidth=1)
# add titles
ax.set_title('Monthly ' + 'M2 percentage change' +
' \nRecessions Shaded Gray',
fontsize=14,
fontweight='demi')
# add axis labels
ax.set_ylabel('% change\n(Annualized)', fontsize=12, fontweight='demi')
ax.set_xlabel('Date', fontsize=12, fontweight='demi')
# upgrade axis tick labels
yticks = ax.get_yticks()
ax.yaxis.set_major_locator(mticker.FixedLocator(yticks))
ax.set_yticklabels(['{:3.1f}%'.format(x*100) for x in yticks]);
dates_rng = pd.date_range(data.index[0], data.index[-1], freq='24M')
plt.xticks(dates_rng, [dtz.strftime('%Y-%m') for dtz in dates_rng],
rotation=45)
# bold up tick axes
ax.tick_params(axis='both', which='major', labelsize=11)
# add cool legend
ax.legend(loc='upper left', fontsize=11, labels=['M2'],
frameon=True).get_frame().set_edgecolor('blue')
plt.savefig('M2SL_recession.png', dpi=300)
|
[
"seaborn.set_style",
"pandas.date_range",
"pandas_datareader.data.DataReader",
"dateutil.relativedelta.relativedelta",
"matplotlib.ticker.FixedLocator",
"pandas.to_datetime",
"seaborn.color_palette",
"pandas.read_pickle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((306, 376), 'seaborn.set_style', 'sns.set_style', (['"""white"""', "{'xtick.major.size': 2, 'ytick.major.size': 2}"], {}), "('white', {'xtick.major.size': 2, 'ytick.major.size': 2})\n", (319, 376), True, 'import seaborn as sns\n'), ((1095, 1133), 'pandas.read_pickle', 'pd.read_pickle', (["(save_loc + '/M2SL.pkl')"], {}), "(save_loc + '/M2SL.pkl')\n", (1109, 1133), True, 'import pandas as pd\n'), ((1290, 1338), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 6)', 'sharex': '(True)'}), '(1, 1, figsize=(12, 6), sharex=True)\n', (1302, 1338), True, 'import matplotlib.pyplot as plt\n'), ((2145, 2201), 'pandas.date_range', 'pd.date_range', (['data.index[0]', 'data.index[-1]'], {'freq': '"""24M"""'}), "(data.index[0], data.index[-1], freq='24M')\n", (2158, 2201), True, 'import pandas as pd\n'), ((2509, 2551), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""M2SL_recession.png"""'], {'dpi': '(300)'}), "('M2SL_recession.png', dpi=300)\n", (2520, 2551), True, 'import matplotlib.pyplot as plt\n'), ((479, 507), 'seaborn.color_palette', 'sns.color_palette', (['flatui', '(7)'], {}), '(flatui, 7)\n', (496, 507), True, 'import seaborn as sns\n'), ((711, 739), 'pandas.to_datetime', 'pd.to_datetime', (['"""1959-12-01"""'], {}), "('1959-12-01')\n", (725, 739), True, 'import pandas as pd\n'), ((750, 778), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020-12-31"""'], {}), "('2020-12-31')\n", (764, 778), True, 'import pandas as pd\n'), ((793, 833), 'pandas_datareader.data.DataReader', 'web.DataReader', (['[f2]', '"""fred"""', 'start', 'end'], {}), "([f2], 'fred', start, end)\n", (807, 833), True, 'import pandas_datareader.data as web\n'), ((936, 976), 'pandas_datareader.data.DataReader', 'web.DataReader', (['[f1]', '"""fred"""', 'start', 'end'], {}), "([f1], 'fred', start, end)\n", (950, 976), True, 'import pandas_datareader.data as web\n'), ((2039, 2067), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['yticks'], {}), '(yticks)\n', (2059, 2067), True, 'import matplotlib.ticker as mticker\n'), ((1482, 1506), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(+1)'}), '(months=+1)\n', (1495, 1506), False, 'from dateutil.relativedelta import relativedelta\n')]
|
import numpy as np
import est_dir
def test_1():
"""
Test for compute_forward() - check for flag=True.
"""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
region = 1
step = 0.17741338024633116
forward_tol = 1000000
no_vars = 10
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track = np.array([[0, f_old], [step, f_new]])
track, count_func_evals, flag = (est_dir.compute_forward
(step, const_forward, forward_tol, track,
centre_point, beta,
f, func_args))
assert(f_old > f_new)
assert(count_func_evals == len(track) - 2)
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
if j < len(track) - 1:
assert(track[j][1] < track[j - 1][1])
else:
assert(track[j][1] > track[j - 1][1])
def test_2():
"""
Test for compute_forward() - check that when flag=False, track is returned.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 1
forward_tol = 100000
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track = np.array([[0, f_old], [step, f_new]])
test_track, count_func_evals, flag = (est_dir.compute_forward
(step, const_forward, forward_tol,
track, centre_point, beta, f,
func_args))
assert(f_old > f_new)
assert(flag == False)
assert(count_func_evals > 0)
for j in range(len(test_track)):
assert(test_track[j, 0] < forward_tol)
if j >= 1:
assert(test_track[j, 1] < test_track[j - 1, 1])
assert(test_track[j, 0] * const_forward > forward_tol)
def test_3():
"""
Test for forward_tracking - flag=True and f_new >= track[-2][1]
"""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.05
forward_tol = 1000000
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(len(track) - 1 == total_func_evals)
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(step, 3))
assert(flag == True)
for j in range(2, len(track)):
step = step * 2
assert(np.round(track[j][0], 3) == step)
if j == (len(track) - 1):
assert(track[j][1] > track[j - 1][1])
else:
assert(track[j - 1][1] > track[j][1])
def test_4():
"""
Test for forward_tracking - forward_tol not met and f_new < track[-2][1].
"""
np.random.seed(25)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 10)
t = 0.005
forward_tol = 10000
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, t, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(flag == True)
for j in range(1, len(track)):
if j == (len(track) - 1):
assert(track[j][1] > track[j-1][1])
else:
assert(track[j-1][1] > track[j][1])
def test_5():
"""
Test for forward_tracking - forward_tol not met initially, f_new <
track[-2][1] and eventually forward_tol is met.
"""
np.random.seed(25)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 10)
t = 0.005
forward_tol = 10
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, t, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(flag == False)
for j in range(1, len(track)):
assert(track[j-1][1] > track[j][1])
def test_6():
"""
Test for forward_tracking - forward_tol met.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 0.5
forward_tol = 1.5
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(flag == False)
assert(track[2][1] < track[1][1] < track[0][1])
assert(total_func_evals == 1)
def test_7():
"""
Test for compute_backward - check that when flag=True, track is updated.
"""
np.random.seed(90)
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 1, (m, ))
centre_point = np.random.uniform(0, 1, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 0.1)
step = 0.001
back_tol = 0.000001
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, f_old], [step, f_new]])
track, total_func_evals, flag = (est_dir.compute_backward
(step, const_back, back_tol, track,
centre_point, beta, f, func_args))
assert(total_func_evals == len(track) - 2)
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_back
if j < len(track) - 1:
assert(track[j][1] < track[j-1][1])
else:
assert(track[j][1] > track[j-1][1])
def test_8():
"""
Test for compute_backward - check that when flag=False,
original track is returned.
"""
np.random.seed(90)
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 1, (m, ))
centre_point = np.random.uniform(0, 1, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 0.1)
step = 0.1
back_tol = 0.075
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, f_old], [step, f_new]])
track_new, total_func_evals, flag = (est_dir.compute_backward
(step, const_back, back_tol, track,
centre_point, beta, f, func_args))
assert(np.all(track == track_new))
assert(flag == False)
assert(total_func_evals == 0)
def test_9():
"""
Test for backward_tracking - back_tol is met.
"""
np.random.seed(32964)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
t = 1
back_tol = 1
beta = np.array([200, 200])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, count_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(track.shape == (2, m))
assert(track[0][0] == 0)
assert(track[1][0] == t)
assert(track[1][0] < track[1][1])
assert(count_func_evals == 0)
def test_10():
"""
Test for backward_tracking - back tol is not met and f_new >
track[-2][1].
"""
np.random.seed(32964)
n = 6
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
t = 97.688932389756
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
for j in range(1, len(track)):
assert(np.round(track[j][0], 4) == np.round(t, 4))
t = t / 2
assert(np.min(track[:, 1]) < track[1][0])
def test_11():
"""
Test for backward_tracking - back tol is not met and f_new < track[-2][1]
"""
np.random.seed(329998)
n = 20
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 10, (m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 1000)
t = 17001.993794080016
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(np.min(track[:, 1]) < track[:, 1][0])
def test_12():
"""
Test for backward_tracking - back tol is not initially met, f_new <
track[-2][1] and eventaully back tol is met.
"""
np.random.seed(329998)
n = 20
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 10, (m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 1000)
t = 17001.993794080016
back_tol = 1
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(np.min(track[:, 1]) < track[:, 1][0])
def test_13():
"""Test for compute_coeffs"""
track_y = np.array([100, 200, 50])
track_t = np.array([0, 1, 0.5])
design_matrix_step = np.vstack((np.repeat(track_y[0], len(track_t)),
np.array(track_t),
np.array(track_t) ** 2)).T
assert(np.all(design_matrix_step[0, :] == np.array([100, 0, 0])))
assert(np.all(design_matrix_step[1, :] == np.array([100, 1, 1])))
assert(np.all(design_matrix_step[2, :] == np.array([100, 0.5, 0.25])))
OLS = (np.linalg.inv(design_matrix_step.T @ design_matrix_step) @
design_matrix_step.T @ track_y)
check = -OLS[1] / (2 * OLS[2])
opt_t = est_dir.compute_coeffs(track_y, track_t)
assert(np.all(np.round(check, 5) == np.round(opt_t, 5)))
def test_14():
"""
Test for combine_tracking - check that correct step size is returned when
forward_tol is met.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 1
forward_tol = 100000
back_tol = 0.0000001
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_15():
"""
Test for combine_tracking - check that correct step size is returned, when
forward_tol is not met.
"""
np.random.seed(3291)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.005
forward_tol = 10000
back_tol = 0.0000001
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_16():
"""
Test for combine_tracking - check that correct step size is returned,
when back_tol is met.
"""
np.random.seed(32964)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 1
back_tol = 1
forward_tol = 100000
beta = np.array([200, 200])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val == f_old)
def test_17():
"""
Test for combine_tracking - check that correct step size is returned,
when back_tol is not met.
"""
np.random.seed(32964)
n = 6
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 10
forward_tol = 1000000
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_18():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 80],
[2, 160],
[4, 40],
[8, 20],
[16, 90]])
track_method = 'Forward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 20, 90])))
assert(np.all(track_t == np.array([0, 8, 16])))
def test_19():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 80],
[2, 70],
[4, 90]])
track_method = 'Forward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 70, 90])))
assert(np.all(track_t == np.array([0, 2, 4])))
def test_20():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 120],
[0.5, 110],
[0.25, 90]])
track_method = 'Backward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 90, 110])))
assert(np.all(track_t == np.array([0, 0.25, 0.5])))
def test_21():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 120],
[0.5, 80]])
track_method = 'Backward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 80, 120])))
assert(np.all(track_t == np.array([0, 0.5, 1])))
def test_22():
"""Test for check_func_val_coeffs when func_val > track_y[1]."""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 60)
step = 1.8251102718712913
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, 100],
[1, 160],
[2, 40],
[4, 90]])
track_method = 'Forward'
upd_point, func_val = (est_dir.check_func_val_coeffs
(track, track_method, centre_point, beta, f,
func_args))
assert(upd_point.shape == (m, ))
assert(func_val == 40)
def test_23():
"""Test for check_func_val_coeffs when func_val <= track_y[1]."""
np.random.seed(91)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.01
forward_tol = 1000000
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(flag == True)
assert(total_func_evals > 0)
track_method = 'Forward'
upd_point, func_val = (est_dir.check_func_val_coeffs
(track, track_method, centre_point, beta, f,
func_args))
assert(upd_point.shape == (m, ))
assert(np.all(func_val <= track[:, 1]))
|
[
"numpy.random.seed",
"est_dir.combine_tracking",
"numpy.ones",
"est_dir.backward_tracking",
"numpy.round",
"est_dir.compute_direction_LS",
"numpy.copy",
"est_dir.compute_coeffs",
"numpy.identity",
"est_dir.forward_tracking",
"est_dir.compute_direction_XY",
"numpy.min",
"numpy.linalg.inv",
"numpy.all",
"numpy.random.uniform",
"est_dir.compute_backward",
"est_dir.compute_forward",
"est_dir.quad_func_params",
"numpy.array",
"est_dir.arrange_track_y_t",
"est_dir.check_func_val_coeffs"
] |
[((134, 152), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (148, 152), True, 'import numpy as np\n'), ((272, 285), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (279, 285), True, 'import numpy as np\n'), ((306, 336), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (323, 336), True, 'import numpy as np\n'), ((352, 386), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (376, 386), False, 'import est_dir\n'), ((547, 623), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (575, 623), False, 'import est_dir\n'), ((887, 924), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (895, 924), True, 'import numpy as np\n'), ((963, 1065), 'est_dir.compute_forward', 'est_dir.compute_forward', (['step', 'const_forward', 'forward_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_forward, forward_tol, track,\n centre_point, beta, f, func_args)\n', (986, 1065), False, 'import est_dir\n'), ((1693, 1711), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (1707, 1711), True, 'import numpy as np\n'), ((1830, 1843), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (1837, 1843), True, 'import numpy as np\n'), ((1864, 1882), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (1872, 1882), True, 'import numpy as np\n'), ((1897, 1911), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (1908, 1911), True, 'import numpy as np\n'), ((2015, 2041), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (2023, 2041), True, 'import numpy as np\n'), ((2169, 2206), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (2177, 2206), True, 'import numpy as np\n'), ((2250, 2352), 'est_dir.compute_forward', 'est_dir.compute_forward', (['step', 'const_forward', 'forward_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_forward, forward_tol, track,\n centre_point, beta, f, func_args)\n', (2273, 2352), False, 'import est_dir\n'), ((2908, 2926), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (2922, 2926), True, 'import numpy as np\n'), ((3046, 3059), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (3053, 3059), True, 'import numpy as np\n'), ((3080, 3110), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (3097, 3110), True, 'import numpy as np\n'), ((3126, 3160), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (3150, 3160), False, 'import est_dir\n'), ((3306, 3382), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (3334, 3382), False, 'import est_dir\n'), ((3698, 3808), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 'step', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, step, f_old, f_new, beta,\n const_forward, forward_tol, f, func_args)\n', (3722, 3808), False, 'import est_dir\n'), ((4496, 4514), 'numpy.random.seed', 'np.random.seed', (['(25)'], {}), '(25)\n', (4510, 4514), True, 'import numpy as np\n'), ((4633, 4646), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (4640, 4646), True, 'import numpy as np\n'), ((4667, 4685), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (4675, 4685), True, 'import numpy as np\n'), ((4700, 4734), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (4724, 4734), False, 'import est_dir\n'), ((4831, 4847), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (4839, 4847), True, 'import numpy as np\n'), ((5024, 5131), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_forward,\n forward_tol, f, func_args)\n', (5048, 5131), False, 'import est_dir\n'), ((5769, 5787), 'numpy.random.seed', 'np.random.seed', (['(25)'], {}), '(25)\n', (5783, 5787), True, 'import numpy as np\n'), ((5906, 5919), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (5913, 5919), True, 'import numpy as np\n'), ((5940, 5958), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (5948, 5958), True, 'import numpy as np\n'), ((5973, 6007), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (5997, 6007), False, 'import est_dir\n'), ((6101, 6117), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (6109, 6117), True, 'import numpy as np\n'), ((6294, 6401), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_forward,\n forward_tol, f, func_args)\n', (6318, 6401), False, 'import est_dir\n'), ((6862, 6880), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (6876, 6880), True, 'import numpy as np\n'), ((6999, 7012), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (7006, 7012), True, 'import numpy as np\n'), ((7033, 7051), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (7041, 7051), True, 'import numpy as np\n'), ((7066, 7080), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (7077, 7080), True, 'import numpy as np\n'), ((7183, 7209), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (7191, 7209), True, 'import numpy as np\n'), ((7362, 7472), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 'step', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, step, f_old, f_new, beta,\n const_forward, forward_tol, f, func_args)\n', (7386, 7472), False, 'import est_dir\n'), ((7822, 7840), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (7836, 7840), True, 'import numpy as np\n'), ((7923, 7952), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (7940, 7952), True, 'import numpy as np\n'), ((7974, 8003), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (7991, 8003), True, 'import numpy as np\n'), ((8019, 8053), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (8043, 8053), False, 'import est_dir\n'), ((8200, 8276), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (8228, 8276), False, 'import est_dir\n'), ((8569, 8606), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (8577, 8606), True, 'import numpy as np\n'), ((8645, 8742), 'est_dir.compute_backward', 'est_dir.compute_backward', (['step', 'const_back', 'back_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_back, back_tol, track, centre_point,\n beta, f, func_args)\n', (8669, 8742), False, 'import est_dir\n'), ((9312, 9330), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (9326, 9330), True, 'import numpy as np\n'), ((9413, 9442), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (9430, 9442), True, 'import numpy as np\n'), ((9464, 9493), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (9481, 9493), True, 'import numpy as np\n'), ((9509, 9543), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (9533, 9543), False, 'import est_dir\n'), ((9685, 9761), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (9713, 9761), False, 'import est_dir\n'), ((10054, 10091), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (10062, 10091), True, 'import numpy as np\n'), ((10134, 10231), 'est_dir.compute_backward', 'est_dir.compute_backward', (['step', 'const_back', 'back_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_back, back_tol, track, centre_point,\n beta, f, func_args)\n', (10158, 10231), False, 'import est_dir\n'), ((10329, 10355), 'numpy.all', 'np.all', (['(track == track_new)'], {}), '(track == track_new)\n', (10335, 10355), True, 'import numpy as np\n'), ((10512, 10533), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (10526, 10533), True, 'import numpy as np\n'), ((10614, 10627), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (10621, 10627), True, 'import numpy as np\n'), ((10648, 10666), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (10656, 10666), True, 'import numpy as np\n'), ((10681, 10715), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (10705, 10715), False, 'import est_dir\n'), ((10800, 10820), 'numpy.array', 'np.array', (['[200, 200]'], {}), '([200, 200])\n', (10808, 10820), True, 'import numpy as np\n'), ((10991, 11093), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (11016, 11093), False, 'import est_dir\n'), ((11454, 11475), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (11468, 11475), True, 'import numpy as np\n'), ((11567, 11580), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (11574, 11580), True, 'import numpy as np\n'), ((11601, 11631), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (11618, 11631), True, 'import numpy as np\n'), ((11646, 11680), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (11670, 11680), False, 'import est_dir\n'), ((11834, 11913), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (11862, 11913), False, 'import est_dir\n'), ((12219, 12321), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (12244, 12321), False, 'import est_dir\n'), ((12815, 12837), 'numpy.random.seed', 'np.random.seed', (['(329998)'], {}), '(329998)\n', (12829, 12837), True, 'import numpy as np\n'), ((12932, 12962), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (12949, 12962), True, 'import numpy as np\n'), ((12983, 13013), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (13000, 13013), True, 'import numpy as np\n'), ((13028, 13062), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (13052, 13062), False, 'import est_dir\n'), ((13222, 13301), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (13250, 13301), False, 'import est_dir\n'), ((13607, 13709), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (13632, 13709), False, 'import est_dir\n'), ((14135, 14157), 'numpy.random.seed', 'np.random.seed', (['(329998)'], {}), '(329998)\n', (14149, 14157), True, 'import numpy as np\n'), ((14252, 14282), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (14269, 14282), True, 'import numpy as np\n'), ((14303, 14333), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (14320, 14333), True, 'import numpy as np\n'), ((14348, 14382), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (14372, 14382), False, 'import est_dir\n'), ((14532, 14611), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (14560, 14611), False, 'import est_dir\n'), ((14917, 15019), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (14942, 15019), False, 'import est_dir\n'), ((15349, 15373), 'numpy.array', 'np.array', (['[100, 200, 50]'], {}), '([100, 200, 50])\n', (15357, 15373), True, 'import numpy as np\n'), ((15389, 15410), 'numpy.array', 'np.array', (['[0, 1, 0.5]'], {}), '([0, 1, 0.5])\n', (15397, 15410), True, 'import numpy as np\n'), ((15987, 16027), 'est_dir.compute_coeffs', 'est_dir.compute_coeffs', (['track_y', 'track_t'], {}), '(track_y, track_t)\n', (16009, 16027), False, 'import est_dir\n'), ((16237, 16255), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (16251, 16255), True, 'import numpy as np\n'), ((16374, 16387), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (16381, 16387), True, 'import numpy as np\n'), ((16408, 16426), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (16416, 16426), True, 'import numpy as np\n'), ((16441, 16455), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (16452, 16455), True, 'import numpy as np\n'), ((16585, 16611), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (16593, 16611), True, 'import numpy as np\n'), ((16708, 16833), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (16732, 16833), False, 'import est_dir\n'), ((17329, 17349), 'numpy.random.seed', 'np.random.seed', (['(3291)'], {}), '(3291)\n', (17343, 17349), True, 'import numpy as np\n'), ((17468, 17481), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (17475, 17481), True, 'import numpy as np\n'), ((17502, 17520), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (17510, 17520), True, 'import numpy as np\n'), ((17535, 17569), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (17559, 17569), False, 'import est_dir\n'), ((17694, 17710), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (17702, 17710), True, 'import numpy as np\n'), ((17807, 17932), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (17831, 17932), False, 'import est_dir\n'), ((18421, 18442), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (18435, 18442), True, 'import numpy as np\n'), ((18561, 18574), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (18568, 18574), True, 'import numpy as np\n'), ((18595, 18613), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (18603, 18613), True, 'import numpy as np\n'), ((18628, 18662), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (18652, 18662), False, 'import est_dir\n'), ((18776, 18796), 'numpy.array', 'np.array', (['[200, 200]'], {}), '([200, 200])\n', (18784, 18796), True, 'import numpy as np\n'), ((18893, 19018), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (18917, 19018), False, 'import est_dir\n'), ((19512, 19533), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (19526, 19533), True, 'import numpy as np\n'), ((19663, 19676), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (19670, 19676), True, 'import numpy as np\n'), ((19697, 19727), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (19714, 19727), True, 'import numpy as np\n'), ((19742, 19776), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (19766, 19776), False, 'import est_dir\n'), ((19947, 20026), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (19975, 20026), False, 'import est_dir\n'), ((20258, 20383), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (20282, 20383), False, 'import est_dir\n'), ((20798, 20865), 'numpy.array', 'np.array', (['[[0, 100], [1, 80], [2, 160], [4, 40], [8, 20], [16, 90]]'], {}), '([[0, 100], [1, 80], [2, 160], [4, 40], [8, 20], [16, 90]])\n', (20806, 20865), True, 'import numpy as np\n'), ((21035, 21081), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (21060, 21081), False, 'import est_dir\n'), ((21262, 21309), 'numpy.array', 'np.array', (['[[0, 100], [1, 80], [2, 70], [4, 90]]'], {}), '([[0, 100], [1, 80], [2, 70], [4, 90]])\n', (21270, 21309), True, 'import numpy as np\n'), ((21433, 21479), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (21458, 21479), False, 'import est_dir\n'), ((21659, 21713), 'numpy.array', 'np.array', (['[[0, 100], [1, 120], [0.5, 110], [0.25, 90]]'], {}), '([[0, 100], [1, 120], [0.5, 110], [0.25, 90]])\n', (21667, 21713), True, 'import numpy as np\n'), ((21838, 21884), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (21863, 21884), False, 'import est_dir\n'), ((22070, 22111), 'numpy.array', 'np.array', (['[[0, 100], [1, 120], [0.5, 80]]'], {}), '([[0, 100], [1, 120], [0.5, 80]])\n', (22078, 22111), True, 'import numpy as np\n'), ((22213, 22259), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (22238, 22259), False, 'import est_dir\n'), ((22466, 22484), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (22480, 22484), True, 'import numpy as np\n'), ((22544, 22557), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (22551, 22557), True, 'import numpy as np\n'), ((22578, 22608), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (22595, 22608), True, 'import numpy as np\n'), ((22624, 22658), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (22648, 22658), False, 'import est_dir\n'), ((22792, 22868), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (22820, 22868), False, 'import est_dir\n'), ((23160, 23208), 'numpy.array', 'np.array', (['[[0, 100], [1, 160], [2, 40], [4, 90]]'], {}), '([[0, 100], [1, 160], [2, 40], [4, 90]])\n', (23168, 23208), True, 'import numpy as np\n'), ((23336, 23424), 'est_dir.check_func_val_coeffs', 'est_dir.check_func_val_coeffs', (['track', 'track_method', 'centre_point', 'beta', 'f', 'func_args'], {}), '(track, track_method, centre_point, beta, f,\n func_args)\n', (23365, 23424), False, 'import est_dir\n'), ((23642, 23660), 'numpy.random.seed', 'np.random.seed', (['(91)'], {}), '(91)\n', (23656, 23660), True, 'import numpy as np\n'), ((23780, 23793), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (23787, 23793), True, 'import numpy as np\n'), ((23814, 23844), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (23831, 23844), True, 'import numpy as np\n'), ((23860, 23894), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (23884, 23894), False, 'import est_dir\n'), ((24040, 24116), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (24068, 24116), False, 'import est_dir\n'), ((24432, 24542), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 'step', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, step, f_old, f_new, beta,\n const_forward, forward_tol, f, func_args)\n', (24456, 24542), False, 'import est_dir\n'), ((24775, 24863), 'est_dir.check_func_val_coeffs', 'est_dir.check_func_val_coeffs', (['track', 'track_method', 'centre_point', 'beta', 'f', 'func_args'], {}), '(track, track_method, centre_point, beta, f,\n func_args)\n', (24804, 24863), False, 'import est_dir\n'), ((24969, 25000), 'numpy.all', 'np.all', (['(func_val <= track[:, 1])'], {}), '(func_val <= track[:, 1])\n', (24975, 25000), True, 'import numpy as np\n'), ((775, 796), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (782, 796), True, 'import numpy as np\n'), ((2057, 2078), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (2064, 2078), True, 'import numpy as np\n'), ((3534, 3555), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (3541, 3555), True, 'import numpy as np\n'), ((3983, 4007), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (3991, 4007), True, 'import numpy as np\n'), ((4011, 4025), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (4019, 4025), True, 'import numpy as np\n'), ((4039, 4063), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (4047, 4063), True, 'import numpy as np\n'), ((4067, 4084), 'numpy.round', 'np.round', (['step', '(3)'], {}), '(step, 3)\n', (4075, 4084), True, 'import numpy as np\n'), ((4863, 4884), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (4870, 4884), True, 'import numpy as np\n'), ((5258, 5282), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (5266, 5282), True, 'import numpy as np\n'), ((5286, 5300), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (5294, 5300), True, 'import numpy as np\n'), ((5314, 5338), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (5322, 5338), True, 'import numpy as np\n'), ((5342, 5356), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (5350, 5356), True, 'import numpy as np\n'), ((6133, 6154), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (6140, 6154), True, 'import numpy as np\n'), ((6528, 6552), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (6536, 6552), True, 'import numpy as np\n'), ((6556, 6570), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (6564, 6570), True, 'import numpy as np\n'), ((6584, 6608), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (6592, 6608), True, 'import numpy as np\n'), ((6612, 6626), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (6620, 6626), True, 'import numpy as np\n'), ((7225, 7246), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (7232, 7246), True, 'import numpy as np\n'), ((8428, 8449), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (8435, 8449), True, 'import numpy as np\n'), ((9913, 9934), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (9920, 9934), True, 'import numpy as np\n'), ((10836, 10857), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (10843, 10857), True, 'import numpy as np\n'), ((12064, 12085), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (12071, 12085), True, 'import numpy as np\n'), ((12397, 12421), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (12405, 12421), True, 'import numpy as np\n'), ((12425, 12439), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (12433, 12439), True, 'import numpy as np\n'), ((12453, 12477), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (12461, 12477), True, 'import numpy as np\n'), ((12481, 12495), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (12489, 12495), True, 'import numpy as np\n'), ((12658, 12677), 'numpy.min', 'np.min', (['track[:, 1]'], {}), '(track[:, 1])\n', (12664, 12677), True, 'import numpy as np\n'), ((13452, 13473), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (13459, 13473), True, 'import numpy as np\n'), ((13785, 13809), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (13793, 13809), True, 'import numpy as np\n'), ((13813, 13827), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (13821, 13827), True, 'import numpy as np\n'), ((13841, 13865), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (13849, 13865), True, 'import numpy as np\n'), ((13869, 13883), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (13877, 13883), True, 'import numpy as np\n'), ((13931, 13950), 'numpy.min', 'np.min', (['track[:, 1]'], {}), '(track[:, 1])\n', (13937, 13950), True, 'import numpy as np\n'), ((14762, 14783), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (14769, 14783), True, 'import numpy as np\n'), ((15095, 15119), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (15103, 15119), True, 'import numpy as np\n'), ((15123, 15137), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (15131, 15137), True, 'import numpy as np\n'), ((15151, 15175), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (15159, 15175), True, 'import numpy as np\n'), ((15179, 15193), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (15187, 15193), True, 'import numpy as np\n'), ((15241, 15260), 'numpy.min', 'np.min', (['track[:, 1]'], {}), '(track[:, 1])\n', (15247, 15260), True, 'import numpy as np\n'), ((16627, 16648), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (16634, 16648), True, 'import numpy as np\n'), ((17726, 17747), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (17733, 17747), True, 'import numpy as np\n'), ((18812, 18833), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (18819, 18833), True, 'import numpy as np\n'), ((20177, 20198), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (20184, 20198), True, 'import numpy as np\n'), ((23021, 23042), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (23028, 23042), True, 'import numpy as np\n'), ((24268, 24289), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (24275, 24289), True, 'import numpy as np\n'), ((825, 846), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (832, 846), True, 'import numpy as np\n'), ((2107, 2128), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (2114, 2128), True, 'import numpy as np\n'), ((3584, 3605), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (3591, 3605), True, 'import numpy as np\n'), ((4189, 4213), 'numpy.round', 'np.round', (['track[j][0]', '(3)'], {}), '(track[j][0], 3)\n', (4197, 4213), True, 'import numpy as np\n'), ((4913, 4934), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (4920, 4934), True, 'import numpy as np\n'), ((6183, 6204), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (6190, 6204), True, 'import numpy as np\n'), ((7275, 7296), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (7282, 7296), True, 'import numpy as np\n'), ((8478, 8499), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (8485, 8499), True, 'import numpy as np\n'), ((9963, 9984), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (9970, 9984), True, 'import numpy as np\n'), ((10886, 10907), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (10893, 10907), True, 'import numpy as np\n'), ((12114, 12135), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (12121, 12135), True, 'import numpy as np\n'), ((12583, 12607), 'numpy.round', 'np.round', (['track[j][0]', '(4)'], {}), '(track[j][0], 4)\n', (12591, 12607), True, 'import numpy as np\n'), ((12611, 12625), 'numpy.round', 'np.round', (['t', '(4)'], {}), '(t, 4)\n', (12619, 12625), True, 'import numpy as np\n'), ((13502, 13523), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (13509, 13523), True, 'import numpy as np\n'), ((14812, 14833), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (14819, 14833), True, 'import numpy as np\n'), ((15652, 15673), 'numpy.array', 'np.array', (['[100, 0, 0]'], {}), '([100, 0, 0])\n', (15660, 15673), True, 'import numpy as np\n'), ((15723, 15744), 'numpy.array', 'np.array', (['[100, 1, 1]'], {}), '([100, 1, 1])\n', (15731, 15744), True, 'import numpy as np\n'), ((15794, 15820), 'numpy.array', 'np.array', (['[100, 0.5, 0.25]'], {}), '([100, 0.5, 0.25])\n', (15802, 15820), True, 'import numpy as np\n'), ((15835, 15891), 'numpy.linalg.inv', 'np.linalg.inv', (['(design_matrix_step.T @ design_matrix_step)'], {}), '(design_matrix_step.T @ design_matrix_step)\n', (15848, 15891), True, 'import numpy as np\n'), ((16047, 16065), 'numpy.round', 'np.round', (['check', '(5)'], {}), '(check, 5)\n', (16055, 16065), True, 'import numpy as np\n'), ((16069, 16087), 'numpy.round', 'np.round', (['opt_t', '(5)'], {}), '(opt_t, 5)\n', (16077, 16087), True, 'import numpy as np\n'), ((21112, 21135), 'numpy.array', 'np.array', (['[100, 20, 90]'], {}), '([100, 20, 90])\n', (21120, 21135), True, 'import numpy as np\n'), ((21168, 21188), 'numpy.array', 'np.array', (['[0, 8, 16]'], {}), '([0, 8, 16])\n', (21176, 21188), True, 'import numpy as np\n'), ((21510, 21533), 'numpy.array', 'np.array', (['[100, 70, 90]'], {}), '([100, 70, 90])\n', (21518, 21533), True, 'import numpy as np\n'), ((21566, 21585), 'numpy.array', 'np.array', (['[0, 2, 4]'], {}), '([0, 2, 4])\n', (21574, 21585), True, 'import numpy as np\n'), ((21915, 21939), 'numpy.array', 'np.array', (['[100, 90, 110]'], {}), '([100, 90, 110])\n', (21923, 21939), True, 'import numpy as np\n'), ((21972, 21996), 'numpy.array', 'np.array', (['[0, 0.25, 0.5]'], {}), '([0, 0.25, 0.5])\n', (21980, 21996), True, 'import numpy as np\n'), ((22290, 22314), 'numpy.array', 'np.array', (['[100, 80, 120]'], {}), '([100, 80, 120])\n', (22298, 22314), True, 'import numpy as np\n'), ((22347, 22368), 'numpy.array', 'np.array', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (22355, 22368), True, 'import numpy as np\n'), ((23071, 23092), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (23078, 23092), True, 'import numpy as np\n'), ((24318, 24339), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (24325, 24339), True, 'import numpy as np\n'), ((15522, 15539), 'numpy.array', 'np.array', (['track_t'], {}), '(track_t)\n', (15530, 15539), True, 'import numpy as np\n'), ((15578, 15595), 'numpy.array', 'np.array', (['track_t'], {}), '(track_t)\n', (15586, 15595), True, 'import numpy as np\n')]
|
import logging as log
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import Qt
from creator.utils import util
from creator.child_views import shared
from creator.child_views import list_view
GENDERLESS = 0
MALE = 1
FEMALE = 2
class GenderTab(QtWidgets.QWidget, shared.Tab):
def __init__(self, data):
super(GenderTab, self).__init__()
uic.loadUi(util.RESOURCE_UI / 'GenderTab.ui', self)
self.data = data
self.extended = False
self.list_gender.setContextMenuPolicy(Qt.CustomContextMenu)
self.list_gender.customContextMenuRequested.connect(self.context_menu)
self.pkmn_list = util.pokemon_list()
self.speciesDropdown.addItems(self.pkmn_list)
self.speciesDropdown.activated.connect(self.extend_dropdown)
self.add_button.clicked.connect(self.add)
def extend_dropdown(self):
data = self.data.container.data() if self.data and self.data.container else None
if data and not self.extended:
self.pkmn_list.extend(data["pokemon.json"])
self.extended = True
self.speciesDropdown.clear()
self.speciesDropdown.addItems(self.pkmn_list)
def add(self):
species = self.speciesDropdown.currentText()
gender = GENDERLESS if self.radioNoGender.isChecked() else None
gender = MALE if self.radioMale.isChecked() else gender
gender = FEMALE if self.radioFemale.isChecked() else gender
self.setattr(self.data.gender, "species", species)
self.setattr(self.data.gender, "gender", gender)
def context_menu(self, pos):
context = QtWidgets.QMenu()
delete_action = context.addAction("delete")
action = context.exec_(self.list_gender.mapToGlobal(pos))
if action == delete_action:
self.delete_gender(self.list_gender.selectedItems()[0])
def delete_gender(self, widget_item):
species_name = widget_item.text()
button_reply = QtWidgets.QMessageBox.question(None, 'Delete',
"Would you like to remove {}".format(species_name),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel,
QtWidgets.QMessageBox.Cancel)
if button_reply == QtWidgets.QMessageBox.Yes:
self.data.container.delete_entry("gender.json", species_name)
self.list_gender.takeItem(self.list_gender.currentRow())
self.data._edited = True
if species_name == self.data.gender.species:
self.data.gender.new()
self.update_list_signal.emit()
log.info("Deleted {}".format(species_name))
def update_custom_list(self):
data = self.data.container.data() if self.data.container else None
if not data or "gender.json" not in data:
return
gender_data = data["gender.json"]
self.list_gender.clear()
for _species, _ in gender_data.items():
self.list_gender.addItem(_species)
|
[
"PyQt5.QtWidgets.QMenu",
"creator.utils.util.pokemon_list",
"PyQt5.uic.loadUi"
] |
[((363, 414), 'PyQt5.uic.loadUi', 'uic.loadUi', (["(util.RESOURCE_UI / 'GenderTab.ui')", 'self'], {}), "(util.RESOURCE_UI / 'GenderTab.ui', self)\n", (373, 414), False, 'from PyQt5 import QtWidgets, uic\n'), ((643, 662), 'creator.utils.util.pokemon_list', 'util.pokemon_list', ([], {}), '()\n', (660, 662), False, 'from creator.utils import util\n'), ((1630, 1647), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', ([], {}), '()\n', (1645, 1647), False, 'from PyQt5 import QtWidgets, uic\n')]
|
"""
Notes
-----
This test and docs/source/usage/iss/iss_cli.sh test the same code paths and should be updated
together
"""
import os
import unittest
import numpy as np
import pandas as pd
import pytest
from starfish.test.full_pipelines.cli._base_cli_test import CLITest
from starfish.types import Features
EXPERIMENT_JSON_URL = "https://d2nhj9g34unfro.cloudfront.net/20181005/ISS-TEST/experiment.json"
@pytest.mark.slow
class TestWithIssData(CLITest, unittest.TestCase):
@property
def spots_file(self):
return "decoded-spots.nc"
@property
def subdirs(self):
return (
"max_projected",
"transforms",
"registered",
"filtered",
"results",
)
@property
def stages(self):
return (
[
"starfish", "validate", "experiment", EXPERIMENT_JSON_URL,
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"MaxProj",
"--dims", "c",
"--dims", "z"
],
[
"starfish", "learn_transform",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Translation",
"--reference-stack",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--upsampling", "1000",
"--axes", "r"
],
[
"starfish", "apply_transform",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--transformation-list", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Warp",
],
[
"starfish", "filter",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][nuclei]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "detect_spots",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--blobs-stack", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"--blobs-axis", "r", "--blobs-axis", "c",
"BlobDetector",
"--min-sigma", "4",
"--max-sigma", "6",
"--num-sigma", "20",
"--threshold", "0.01",
],
[
"starfish", "segment",
"--primary-images", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--nuclei", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"Watershed",
"--nuclei-threshold", ".16",
"--input-threshold", ".22",
"--min-distance", "57",
],
[
"starfish", "target_assignment",
"--label-image",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"--intensities", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"Label",
],
[
"starfish", "decode",
"-i", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"--codebook",
f"@{EXPERIMENT_JSON_URL}",
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc"),
"PerRoundMaxChannelDecoder",
],
# Validate results/{spots,targeted-spots,decoded-spots}.nc
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc")
],
)
def verify_results(self, intensities):
# TODO make this test stronger
genes, counts = np.unique(
intensities.coords[Features.TARGET], return_counts=True)
gene_counts = pd.Series(counts, genes)
# TODO THERE"S NO HUMAN/MOUSE KEYS?
assert gene_counts['ACTB']
|
[
"os.path.join",
"pandas.Series",
"numpy.unique"
] |
[((6573, 6639), 'numpy.unique', 'np.unique', (['intensities.coords[Features.TARGET]'], {'return_counts': '(True)'}), '(intensities.coords[Features.TARGET], return_counts=True)\n', (6582, 6639), True, 'import numpy as np\n'), ((6675, 6699), 'pandas.Series', 'pd.Series', (['counts', 'genes'], {}), '(counts, genes)\n', (6684, 6699), True, 'import pandas as pd\n'), ((1104, 1165), 'os.path.join', 'os.path.join', (['tempdir', '"""max_projected"""', '"""primary_images.json"""'], {}), "(tempdir, 'max_projected', 'primary_images.json')\n", (1116, 1165), False, 'import os\n'), ((1413, 1474), 'os.path.join', 'os.path.join', (['tempdir', '"""max_projected"""', '"""primary_images.json"""'], {}), "(tempdir, 'max_projected', 'primary_images.json')\n", (1425, 1474), False, 'import os\n'), ((1558, 1612), 'os.path.join', 'os.path.join', (['tempdir', '"""transforms"""', '"""transforms.json"""'], {}), "(tempdir, 'transforms', 'transforms.json')\n", (1570, 1612), False, 'import os\n'), ((2056, 2114), 'os.path.join', 'os.path.join', (['tempdir', '"""registered"""', '"""primary_images.json"""'], {}), "(tempdir, 'registered', 'primary_images.json')\n", (2068, 2114), False, 'import os\n'), ((2211, 2265), 'os.path.join', 'os.path.join', (['tempdir', '"""transforms"""', '"""transforms.json"""'], {}), "(tempdir, 'transforms', 'transforms.json')\n", (2223, 2265), False, 'import os\n'), ((2439, 2497), 'os.path.join', 'os.path.join', (['tempdir', '"""registered"""', '"""primary_images.json"""'], {}), "(tempdir, 'registered', 'primary_images.json')\n", (2451, 2497), False, 'import os\n'), ((2581, 2637), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""primary_images.json"""'], {}), "(tempdir, 'filtered', 'primary_images.json')\n", (2593, 2637), False, 'import os\n'), ((2948, 2996), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""nuclei.json"""'], {}), "(tempdir, 'filtered', 'nuclei.json')\n", (2960, 2996), False, 'import os\n'), ((3305, 3351), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""dots.json"""'], {}), "(tempdir, 'filtered', 'dots.json')\n", (3317, 3351), False, 'import os\n'), ((3580, 3636), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""primary_images.json"""'], {}), "(tempdir, 'filtered', 'primary_images.json')\n", (3592, 3636), False, 'import os\n'), ((3720, 3764), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""spots.nc"""'], {}), "(tempdir, 'results', 'spots.nc')\n", (3732, 3764), False, 'import os\n'), ((3853, 3899), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""dots.json"""'], {}), "(tempdir, 'filtered', 'dots.json')\n", (3865, 3899), False, 'import os\n'), ((4297, 4353), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""primary_images.json"""'], {}), "(tempdir, 'filtered', 'primary_images.json')\n", (4309, 4353), False, 'import os\n'), ((4437, 4485), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""nuclei.json"""'], {}), "(tempdir, 'filtered', 'nuclei.json')\n", (4449, 4485), False, 'import os\n'), ((4563, 4614), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""label_image.png"""'], {}), "(tempdir, 'results', 'label_image.png')\n", (4575, 4614), False, 'import os\n'), ((4955, 5006), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""label_image.png"""'], {}), "(tempdir, 'results', 'label_image.png')\n", (4967, 5006), False, 'import os\n'), ((5095, 5139), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""spots.nc"""'], {}), "(tempdir, 'results', 'spots.nc')\n", (5107, 5139), False, 'import os\n'), ((5223, 5276), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""targeted-spots.nc"""'], {}), "(tempdir, 'results', 'targeted-spots.nc')\n", (5235, 5276), False, 'import os\n'), ((5446, 5499), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""targeted-spots.nc"""'], {}), "(tempdir, 'results', 'targeted-spots.nc')\n", (5458, 5499), False, 'import os\n'), ((5650, 5702), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""decoded-spots.nc"""'], {}), "(tempdir, 'results', 'decoded-spots.nc')\n", (5662, 5702), False, 'import os\n'), ((5970, 6014), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""spots.nc"""'], {}), "(tempdir, 'results', 'spots.nc')\n", (5982, 6014), False, 'import os\n'), ((6164, 6217), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""targeted-spots.nc"""'], {}), "(tempdir, 'results', 'targeted-spots.nc')\n", (6176, 6217), False, 'import os\n'), ((6367, 6419), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""decoded-spots.nc"""'], {}), "(tempdir, 'results', 'decoded-spots.nc')\n", (6379, 6419), False, 'import os\n')]
|
# !/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
import SillyServer
setup(
name="SillyServer",
version=SillyServer.__VERSION__,
author=SillyServer.__AUTHOR__,
url=SillyServer.__URL__,
license=SillyServer.__LICENSE__,
packages=find_packages(),
description="A web framework that is silly",
keywords="silly server",
test_suite="nose.collector"
)
|
[
"setuptools.find_packages"
] |
[((285, 300), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (298, 300), False, 'from setuptools import setup, find_packages\n')]
|
from django_filters import rest_framework as filters
from hive_sbi_api.core.models import Transaction
class TransactionFilter(filters.FilterSet):
account = filters.CharFilter(
field_name='account__account',
label='account',
)
sponsor = filters.CharFilter(
field_name='sponsor__account',
label='sponsor',
)
sponsee = filters.CharFilter(
field_name='sponsees__account__account',
label='sponsee',
)
class Meta:
model = Transaction
fields = (
'source',
'account',
'sponsor',
'status',
'share_type',
'sponsee',
)
|
[
"django_filters.rest_framework.CharFilter"
] |
[((163, 229), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', ([], {'field_name': '"""account__account"""', 'label': '"""account"""'}), "(field_name='account__account', label='account')\n", (181, 229), True, 'from django_filters import rest_framework as filters\n'), ((268, 334), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', ([], {'field_name': '"""sponsor__account"""', 'label': '"""sponsor"""'}), "(field_name='sponsor__account', label='sponsor')\n", (286, 334), True, 'from django_filters import rest_framework as filters\n'), ((373, 449), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', ([], {'field_name': '"""sponsees__account__account"""', 'label': '"""sponsee"""'}), "(field_name='sponsees__account__account', label='sponsee')\n", (391, 449), True, 'from django_filters import rest_framework as filters\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# DATE: 2021/7/24
# Author: <EMAIL>
from collections import OrderedDict
from threading import Lock
from time import time as current
from typing import Dict, Any, Type, Union, Optional, NoReturn, Tuple, List, Callable
from cache3 import AbstractCache
from cache3.setting import DEFAULT_TIMEOUT, DEFAULT_TAG
from cache3.utils import NullContext
LK: Type = Union[NullContext, Lock]
Number: Type = Union[int, float]
TG: Type = Optional[str]
SK: Type = Tuple[Any, TG]
Time: Type = float
VT: Type = int
VH = Callable[[Any, VT], NoReturn]
VT_SET = 0
VT_GET = 1
VT_INCR = 2
_caches: Dict[Any, Any] = {}
_expire_info: Dict[Any, Any] = {}
_locks: Dict[Any, Any] = {}
# Thread unsafe cache in memory
class SimpleCache(AbstractCache):
"""
Simple encapsulation of ``OrderedDict``, so it has a performance similar
to that of a ``dict``, at the same time, it requirements for keys and
values are also relatively loose.
It is entirely implemented by memory, so use the required control capacity
and expiration time to avoid wast memory.
>>> cache = SimpleCache('test_cache', 60)
>>> cache.set('name', 'venus')
True
>>> cache.get('name')
'venus'
>>> cache.delete('name')
True
>>> cache.get('name')
>>> cache.set('gender', 'male', 0)
True
>>> cache.get('gender')
"""
LOCK: LK = NullContext
def __init__(self, *args, **kwargs) -> None:
super(SimpleCache, self).__init__(*args, **kwargs)
self.visit_hook: VH = getattr(self, f'{self.evict_type}_hook_visit')
# Attributes _name, _timeout from validate.
self._cache: OrderedDict[SK, Any] = _caches.setdefault(
self.name, OrderedDict()
)
self._expire_info: Dict[SK, Any] = _expire_info.setdefault(self.name, {})
self._lock: LK = _locks.setdefault(self.name, self.LOCK())
def set(
self, key: Any, value: Any, timeout: Number = DEFAULT_TIMEOUT,
tag: TG = DEFAULT_TAG
) -> bool:
store_key: SK = self.store_key(key, tag=tag)
serial_value: Any = self.serialize(value)
with self._lock:
return self._set(store_key, serial_value, timeout)
def get(self, key: str, default: Any = None, tag: TG = DEFAULT_TAG) -> Any:
store_key: SK = self.store_key(key, tag=tag)
with self._lock:
if self._has_expired(store_key):
self._delete(store_key)
return default
value: Any = self.deserialize(self._cache[store_key])
self.visit_hook(store_key, VT_GET)
return value
def ex_set(
self, key: str, value: Any, timeout: float = DEFAULT_TIMEOUT,
tag: Optional[str] = DEFAULT_TAG
) -> bool:
""" Realize the mutually exclusive operation of data through thread lock.
but whether the mutex takes effect depends on the lock type.
"""
store_key: SK = self.store_key(key, tag=tag)
serial_value: Any = self.serialize(value)
with self._lock:
if self._has_expired(store_key):
self._set(store_key, serial_value, timeout)
return True
return False
def touch(self, key: str, timeout: Number, tag: TG = DEFAULT_TAG) -> bool:
""" Renew the key. When the key does not exist, false will be returned """
store_key: SK = self.store_key(key, tag=tag)
with self._lock:
if self._has_expired(store_key):
return False
self._expire_info[store_key] = self.get_backend_timeout(timeout)
return True
def delete(self, key: str, tag: TG = DEFAULT_TAG) -> bool:
store_key: SK = self.store_key(key, tag=tag)
with self._lock:
return self._delete(store_key)
def inspect(self, key: str, tag: TG = DEFAULT_TAG) -> Optional[Dict[str, Any]]:
""" Get the details of the key value include stored key and
serialized value.
"""
store_key: SK = self.store_key(key, tag)
if not self._has_expired(store_key):
return {
'key': key,
'store_key': store_key,
'store_value': self._cache[store_key],
'value': self.deserialize(self._cache[store_key]),
'expire': self._expire_info[store_key]
}
def incr(self, key: str, delta: int = 1, tag: TG = DEFAULT_TAG) -> Number:
""" Will throed ValueError when the key is not existed. """
store_key: SK = self.store_key(key, tag=tag)
with self._lock:
if self._has_expired(store_key):
self._delete(store_key)
raise ValueError("Key '%s' not found" % key)
value: Any = self.deserialize(self._cache[store_key])
serial_value: int = self.serialize(value + delta)
self._cache[store_key] = serial_value
self.visit_hook(store_key, VT_INCR)
return serial_value
def has_key(self, key: str, tag: TG = DEFAULT_TAG) -> bool:
store_key: SK = self.store_key(key, tag=tag)
with self._lock:
if self._has_expired(store_key):
self._delete(store_key)
return False
return True
def ttl(self, key: Any, tag: TG) -> Time:
store_key: Any = self.store_key(key, tag)
if self._has_expired(store_key):
return -1
return self._expire_info[store_key] - current()
def clear(self) -> bool:
with self._lock:
self._cache.clear()
self._expire_info.clear()
return True
def evict(self) -> NoReturn:
if self.cull_size == 0:
self._cache.clear()
self._expire_info.clear()
else:
count = len(self._cache) // self.cull_size
for i in range(count):
store_key, _ = self._cache.popitem()
del self._expire_info[store_key]
def store_key(self, key: Any, tag: TG) -> SK:
return key, tag
def restore_key(self, store_key: SK) -> SK:
return store_key
def _has_expired(self, store_key: SK) -> bool:
exp: float = self._expire_info.get(store_key, -1.)
return exp is not None and exp <= current()
def _delete(self, store_key: SK) -> bool:
try:
del self._cache[store_key]
del self._expire_info[store_key]
except KeyError:
return False
return True
def _set(self, store_key: SK, value: Any, timeout=DEFAULT_TIMEOUT) -> bool:
if self.timeout and len(self) >= self.max_size:
self.evict()
self._cache[store_key] = value
self.visit_hook(store_key, VT_SET)
self._expire_info[store_key] = self.get_backend_timeout(timeout)
return True
def __iter__(self) -> Tuple[Any, ...]:
for store_key in reversed(self._cache.keys()):
if not self._has_expired(store_key):
key, tag = self.restore_key(store_key)
yield key, self.deserialize(self._cache[store_key]), tag
def __len__(self) -> int:
return len(self._cache)
def lru_hook_visit(self, store_key: Any, vt: VT) -> NoReturn:
self._cache.move_to_end(store_key, last=False)
def lfu_hook_visit(self, store_key: Any, vt: VT) -> NoReturn:
""""""
def fifo_hook_visit(self, store_key: Any, vt: VT) -> NoReturn:
if vt == VT_SET:
self._cache.move_to_end(store_key, last=False)
__delitem__ = delete
__getitem__ = get
__setitem__ = set
# Thread safe cache in memory
class SafeCache(SimpleCache):
LOCK: LK = Lock
|
[
"collections.OrderedDict",
"time.time"
] |
[((1726, 1739), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1737, 1739), False, 'from collections import OrderedDict\n'), ((5522, 5531), 'time.time', 'current', ([], {}), '()\n', (5529, 5531), True, 'from time import time as current\n'), ((6321, 6330), 'time.time', 'current', ([], {}), '()\n', (6328, 6330), True, 'from time import time as current\n')]
|
from django.urls import re_path
import mainapp.views as mainapp
from .apps import MainappConfig
app_name = MainappConfig.name
urlpatterns = [
re_path(r"^$", mainapp.products, name="index"),
re_path(r"^category/(?P<pk>\d+)/$", mainapp.products, name="category"),
re_path(r"^category/(?P<pk>\d+)/page/(?P<page>\d+)/$", mainapp.products, name="page"),
re_path(r"^product/(?P<pk>\d+)/$", mainapp.product, name="product"),
]
|
[
"django.urls.re_path"
] |
[((150, 195), 'django.urls.re_path', 're_path', (['"""^$"""', 'mainapp.products'], {'name': '"""index"""'}), "('^$', mainapp.products, name='index')\n", (157, 195), False, 'from django.urls import re_path\n'), ((202, 272), 'django.urls.re_path', 're_path', (['"""^category/(?P<pk>\\\\d+)/$"""', 'mainapp.products'], {'name': '"""category"""'}), "('^category/(?P<pk>\\\\d+)/$', mainapp.products, name='category')\n", (209, 272), False, 'from django.urls import re_path\n'), ((278, 368), 'django.urls.re_path', 're_path', (['"""^category/(?P<pk>\\\\d+)/page/(?P<page>\\\\d+)/$"""', 'mainapp.products'], {'name': '"""page"""'}), "('^category/(?P<pk>\\\\d+)/page/(?P<page>\\\\d+)/$', mainapp.products,\n name='page')\n", (285, 368), False, 'from django.urls import re_path\n'), ((369, 436), 'django.urls.re_path', 're_path', (['"""^product/(?P<pk>\\\\d+)/$"""', 'mainapp.product'], {'name': '"""product"""'}), "('^product/(?P<pk>\\\\d+)/$', mainapp.product, name='product')\n", (376, 436), False, 'from django.urls import re_path\n')]
|
from math import pi
import os
from MultiHex2.tools import Basic_Tool
from MultiHex2.core.coordinates import screen_to_hex, hex_to_screen
from MultiHex2.actions import NullAction
from tools.basic_tool import ToolLayer
from PyQt5 import QtGui
art_dir = os.path.join( os.path.dirname(__file__),'..','assets','buttons')
class MapUse(Basic_Tool):
"""
Define the tool that can be used to move mobiles around, look at them, edit them, etc...
"""
def __init__(self, parent=None):
super().__init__(parent)
self.dimensions = self.parent.dimensions
@classmethod
def buttonIcon(cls):
assert(os.path.exists(os.path.join(art_dir, "temp.svg")))
return QtGui.QPixmap(os.path.join(art_dir, "temp.svg")).scaled(48,48)
@classmethod
def tool_layer(cls):
return ToolLayer.mapuse
def primary_mouse_released(self, event):
locid = screen_to_hex( event.scenePos() )
pos = hex_to_screen(locid)
longitude = 2*pi*pos.x()/self.dimensions[0]
latitude = -(pi*pos.y()/self.dimensions[1]) + 0.5*pi
self.parent.config_with(latitude,longitude)
self.parent.update_times()
# check for mobile here,
return NullAction()
|
[
"MultiHex2.core.coordinates.hex_to_screen",
"os.path.dirname",
"os.path.join",
"MultiHex2.actions.NullAction"
] |
[((269, 294), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (284, 294), False, 'import os\n'), ((954, 974), 'MultiHex2.core.coordinates.hex_to_screen', 'hex_to_screen', (['locid'], {}), '(locid)\n', (967, 974), False, 'from MultiHex2.core.coordinates import screen_to_hex, hex_to_screen\n'), ((1247, 1259), 'MultiHex2.actions.NullAction', 'NullAction', ([], {}), '()\n', (1257, 1259), False, 'from MultiHex2.actions import NullAction\n'), ((654, 687), 'os.path.join', 'os.path.join', (['art_dir', '"""temp.svg"""'], {}), "(art_dir, 'temp.svg')\n", (666, 687), False, 'import os\n'), ((719, 752), 'os.path.join', 'os.path.join', (['art_dir', '"""temp.svg"""'], {}), "(art_dir, 'temp.svg')\n", (731, 752), False, 'import os\n')]
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="sciPENN",
version="0.9.6",
author="<NAME>",
author_email="<EMAIL>",
description="A package for integrative and predictive analysis of CITE-seq data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jlakkis/sciPENN",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=['torch>=1.6.1', 'numba<=0.50.0', 'scanpy>=1.7.1', 'pandas>=1.1.5', 'numpy>=1.20.1', 'scipy>=1.6.1', 'tqdm>=4.59.0', 'anndata>=0.7.5'],
python_requires=">=3.7",
)
|
[
"setuptools.find_packages"
] |
[((646, 683), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (670, 683), False, 'import setuptools\n')]
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2020 parasim inc
# (c) 2010-2020 california institute of technology
# all rights reserved
#
# the package
import altar
# and the protocols
from .Controller import Controller as controller
from .Sampler import Sampler as sampler
from .Scheduler import Scheduler as scheduler
from .Solver import Solver as solver
# implementations
@altar.foundry(
implements=controller,
tip="a Bayesian controller that implements simulated annealing")
def annealer():
# grab the factory
from .Annealer import Annealer
# attach its docstring
__doc__ = Annealer.__doc__
# and return it
return Annealer
@altar.foundry(
implements=scheduler,
tip="a Bayesian scheduler based on the COV algorithm")
def cov():
# grab the factory
from .COV import COV
# attach its docstring
__doc__ = COV.__doc__
# and return it
return COV
@altar.foundry(
implements=solver,
tip="a solver for δβ based on a Brent minimizer from gsl")
def brent():
# grab the factory
from .Brent import Brent
# attach its docstring
__doc__ = Brent.__doc__
# and return it
return Brent
@altar.foundry(
implements=solver,
tip="a solver for δβ based on a naive grid search")
def grid():
# grab the factory
from .Grid import Grid
# attach its docstring
__doc__ = Grid.__doc__
# and return it
return Grid
@altar.foundry(
implements=sampler,
tip="a Bayesian sampler based on the Metropolis algorithm")
def metropolis():
# grab the factory
from .Metropolis import Metropolis
# attach its docstring
__doc__ = Metropolis.__doc__
# and return it
return Metropolis
@altar.foundry(
implements=altar.simulations.monitor,
tip="a monitor that times the various simulation phases")
def profiler():
# grab the factory
from .Profiler import Profiler
# attach its docstring
__doc__ = Profiler.__doc__
# and return it
return Profiler
# end of file
|
[
"altar.foundry"
] |
[((409, 515), 'altar.foundry', 'altar.foundry', ([], {'implements': 'controller', 'tip': '"""a Bayesian controller that implements simulated annealing"""'}), "(implements=controller, tip=\n 'a Bayesian controller that implements simulated annealing')\n", (422, 515), False, 'import altar\n'), ((695, 790), 'altar.foundry', 'altar.foundry', ([], {'implements': 'scheduler', 'tip': '"""a Bayesian scheduler based on the COV algorithm"""'}), "(implements=scheduler, tip=\n 'a Bayesian scheduler based on the COV algorithm')\n", (708, 790), False, 'import altar\n'), ((945, 1041), 'altar.foundry', 'altar.foundry', ([], {'implements': 'solver', 'tip': '"""a solver for δβ based on a Brent minimizer from gsl"""'}), "(implements=solver, tip=\n 'a solver for δβ based on a Brent minimizer from gsl')\n", (958, 1041), False, 'import altar\n'), ((1206, 1295), 'altar.foundry', 'altar.foundry', ([], {'implements': 'solver', 'tip': '"""a solver for δβ based on a naive grid search"""'}), "(implements=solver, tip=\n 'a solver for δβ based on a naive grid search')\n", (1219, 1295), False, 'import altar\n'), ((1455, 1553), 'altar.foundry', 'altar.foundry', ([], {'implements': 'sampler', 'tip': '"""a Bayesian sampler based on the Metropolis algorithm"""'}), "(implements=sampler, tip=\n 'a Bayesian sampler based on the Metropolis algorithm')\n", (1468, 1553), False, 'import altar\n'), ((1743, 1857), 'altar.foundry', 'altar.foundry', ([], {'implements': 'altar.simulations.monitor', 'tip': '"""a monitor that times the various simulation phases"""'}), "(implements=altar.simulations.monitor, tip=\n 'a monitor that times the various simulation phases')\n", (1756, 1857), False, 'import altar\n')]
|
"""
Trading-Technical-Indicators (tti) python library
File name: _volume_oscillator.py
Implements the Volume Oscillator technical indicator.
"""
import pandas as pd
from ._technical_indicator import TechnicalIndicator
from ..utils.constants import TRADE_SIGNALS
from ..utils.exceptions import NotEnoughInputData, WrongTypeForInputParameter,\
WrongValueForInputParameter
class VolumeOscillator(TechnicalIndicator):
"""
Volume Oscillator Technical Indicator class implementation.
Args:
input_data (pandas.DataFrame): The input data. Required input column
is ``volume``. The index is of type ``pandas.DatetimeIndex``.
long_period (int, default=5): The past periods to be used for the
calculation of the long moving average.
short_period (int, default=2): The past periods to be used for the
calculation of the short moving average.
fill_missing_values (bool, default=True): If set to True, missing
values in the input data are being filled.
Attributes:
_input_data (pandas.DataFrame): The ``input_data`` after preprocessing.
_ti_data (pandas.DataFrame): The calculated indicator. Index is of type
``pandas.DatetimeIndex``. It contains one column, the ``vosc``.
_properties (dict): Indicator properties.
_calling_instance (str): The name of the class.
Raises:
WrongTypeForInputParameter: Input argument has wrong type.
WrongValueForInputParameter: Unsupported value for input argument.
NotEnoughInputData: Not enough data for calculating the indicator.
TypeError: Type error occurred when validating the ``input_data``.
ValueError: Value error occurred when validating the ``input_data``.
"""
def __init__(self, input_data, long_period=5, short_period=2,
fill_missing_values=True):
# Validate and store if needed, the input parameters
if isinstance(long_period, int):
if long_period > 0:
self._long_period = long_period
else:
raise WrongValueForInputParameter(
long_period, 'long_period', '>0')
else:
raise WrongTypeForInputParameter(
type(long_period), 'long_period', 'int')
if isinstance(short_period, int):
if short_period > 0:
self._short_period = short_period
else:
raise WrongValueForInputParameter(
short_period, 'short_period', '>0')
else:
raise WrongTypeForInputParameter(
type(short_period), 'short_period', 'int')
if self._long_period <= self._short_period:
raise WrongValueForInputParameter(
long_period, 'long_period ',
'> short_period [' + str(self._short_period) + ']')
# Control is passing to the parent class
super().__init__(calling_instance=self.__class__.__name__,
input_data=input_data,
fill_missing_values=fill_missing_values)
def _calculateTi(self):
"""
Calculates the technical indicator for the given input data. The input
data are taken from an attribute of the parent class.
Returns:
pandas.DataFrame: The calculated indicator. Index is of type
``pandas.DatetimeIndex``. It contains one column, the ``vosc``.
Raises:
NotEnoughInputData: Not enough data for calculating the indicator.
"""
# Not enough data for the requested period
if len(self._input_data.index) < self._long_period:
raise NotEnoughInputData('Volume Oscillator', self._long_period,
len(self._input_data.index))
vosc = pd.DataFrame(index=self._input_data.index, columns=['vosc'],
data=None, dtype='float64')
vosc['vosc'] = self._input_data['volume'].rolling(
window=self._short_period, min_periods=self._short_period,
center=False, win_type=None, on=None, axis=0, closed=None
).mean() - self._input_data['volume'].rolling(
window=self._long_period, min_periods=self._long_period,
center=False, win_type=None, on=None, axis=0, closed=None).mean()
return vosc.round(4)
def getTiSignal(self):
"""
Calculates and returns the trading signal for the calculated technical
indicator.
Returns:
{('hold', 0), ('buy', -1), ('sell', 1)}: The calculated trading
signal.
"""
# Not enough data for calculating trading signal
if len(self._ti_data.index) < 3:
return TRADE_SIGNALS['hold']
if (0 < self._ti_data['vosc'].iat[-3] < self._ti_data['vosc'].iat[-2] <
self._ti_data['vosc'].iat[-1]):
return TRADE_SIGNALS['buy']
if (self._ti_data['vosc'].iat[-3] > self._ti_data['vosc'].iat[-2] >
self._ti_data['vosc'].iat[-1] > 0):
return TRADE_SIGNALS['sell']
return TRADE_SIGNALS['hold']
|
[
"pandas.DataFrame"
] |
[((3853, 3945), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self._input_data.index', 'columns': "['vosc']", 'data': 'None', 'dtype': '"""float64"""'}), "(index=self._input_data.index, columns=['vosc'], data=None,\n dtype='float64')\n", (3865, 3945), True, 'import pandas as pd\n')]
|
# NOTE: Derived from https://github.com/biocore/qurro/blob/master/setup.py
from setuptools import find_packages, setup
classes = """
Development Status :: 3 - Alpha
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
"""
classifiers = [s.strip() for s in classes.split("\n") if s]
description = "Minimal Python library for parsing SPAdes FASTG files"
with open("README.md") as f:
long_description = f.read()
version = "0.0.0"
setup(
name="pyfastg",
version=version,
license="MIT",
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author="<NAME>, <NAME>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com/fedarko/pyfastg",
classifiers=classifiers,
packages=find_packages(),
install_requires=["networkx", "scikit-bio"],
extras_require={"dev": ["pytest", "pytest-cov", "flake8", "black"]},
)
|
[
"setuptools.find_packages"
] |
[((974, 989), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (987, 989), False, 'from setuptools import find_packages, setup\n')]
|
from mayavi import mlab as mayalab
import numpy as np
import os
def plot_pc(pcs,color=None,scale_factor=.05,mode='point'):
if color == 'red':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(1,0,0))
print("color",color)
elif color == 'blue':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,0,1))
elif color == 'green':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,1,0))
elif color == 'ycan':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,1,1))
else:
print("unkown color")
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=color)
def plot_pc_with_normal(pcs,pcs_n,scale_factor=1.0,color='red'):
if color == 'red':
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], color=(1,0,0), mode='arrow',scale_factor=1.0)
elif color == 'blue':
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], color=(0,0,1), mode='arrow',scale_factor=1.0)
elif color == 'green':
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], color=(0,1,0), mode='arrow',scale_factor=1.0)
def plot_origin():
origin_pc = np.array([0.0,0.0,0.0]).reshape((-1,3))
plot_pc(origin_pc,color='ycan',mode='sphere',scale_factor=.01)
origin_pcs = np.tile(origin_pc,(3,1))
origin_pcns = np.eye(3) * 0.01
plot_pc_with_normal(origin_pcs,origin_pcns)
if __name__ == '__main__':
#save_dir = '/home/lins/MetaGrasp/Data/BlensorResult/2056'
#gripper_name = '056_rho0.384015_azi1.000000_ele89.505854_theta0.092894_xcam0.000000_ycam0.000000_zcam0.384015_scale0.146439_xdim0.084960_ydim0.084567_zdim0.08411000000_pcn_new.npz.npy'
#gripper_name ='339_rho0.308024_azi6.000000_ele89.850030_theta-0.013403_xcam0.000000_ycam0.000000_zcam0.308024_scale0.061975_xdim0.048725_ydim0.036192_zdim0.01252500000_pcn.npz'
gripper = np.load(os.path.join("robotiq2f_open.npy"))
#plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.002)
plot_pc(gripper,color=(209/255.0,64/255.0,109/255.0),mode='sphere',scale_factor=0.002)
plot_origin()
mayalab.show()
#sle = np.array([1494,1806])
#plot_pc(gripper[sle],color='red',mode='sphere',scale_factor=0.002)
#mayalab.show()
#save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/interp'
#save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data3'
# #save_dir_gt = '/home/lins/MetaGrasp/Data/Gripper/Data'
save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data_DB/G5/f2_5_close.npy'
a = np.load(save_dir)
plot_pc(a)
save_dirb = '/home/lins/MetaGrasp/Data/Gripper/Data_DB/G3/f2_3_close.npy'
b = np.load(save_dirb)
plot_pc(b,color='red')
mayalab.show()
#for i in range(10001,10300):
# gripper_name = 'f2_'+str(i)+'_middel.npy'
#print(gripper_name)
# gripper = np.load(os.path.join(save_dir,gripper_name))
# plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.002)
# plot_origin()
# mayalab.show()
#save_dir_gt = '/home/lins/MetaGrasp/Data/Gripper/Data'
#gripper_gt = np.load(os.path.join(save_dir_gt,gripper_name))
#plot_pc(gripper_gt,color='red',mode='sphere',scale_factor=0.002)
if 0:
for i in range(0,199):
save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data_noR'
#save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/recon_old'
gripper_name = 'robotiq_3f_'+str(i)+'.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
if 0:
save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/interp'
gripper_name = 'kinova_kg3_0.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
gripper_name = 'robotiq_3f_1.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/interp'
gripper_name = 'middle0.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
gripper_name = 'middle1.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data_noR'
gripper_name1 = 'kinova_kg3_0.npy'
print(gripper_name)
gripper1 = np.load(os.path.join(save_dir,gripper_name1))
plot_pc(gripper1,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
gripper_name2 = 'robotiq_3f_1.npy'
print(gripper_name)
gripper2 = np.load(os.path.join(save_dir,gripper_name2))
plot_pc(gripper2,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
|
[
"numpy.load",
"mayavi.mlab.quiver3d",
"mayavi.mlab.show",
"mayavi.mlab.points3d",
"numpy.array",
"numpy.tile",
"numpy.eye",
"os.path.join"
] |
[((1488, 1514), 'numpy.tile', 'np.tile', (['origin_pc', '(3, 1)'], {}), '(origin_pc, (3, 1))\n', (1495, 1514), True, 'import numpy as np\n'), ((2297, 2311), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (2309, 2311), True, 'from mayavi import mlab as mayalab\n'), ((2695, 2712), 'numpy.load', 'np.load', (['save_dir'], {}), '(save_dir)\n', (2702, 2712), True, 'import numpy as np\n'), ((2805, 2823), 'numpy.load', 'np.load', (['save_dirb'], {}), '(save_dirb)\n', (2812, 2823), True, 'import numpy as np\n'), ((2849, 2863), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (2861, 2863), True, 'from mayavi import mlab as mayalab\n'), ((150, 259), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(1, 0, 0)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(1, 0, 0))\n', (166, 259), True, 'from mayavi import mlab as mayalab\n'), ((867, 1008), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'color': '(1, 0, 0)', 'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], color=(1, 0, 0), mode='arrow', scale_factor=1.0)\n", (883, 1008), True, 'from mayavi import mlab as mayalab\n'), ((1529, 1538), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1535, 1538), True, 'import numpy as np\n'), ((2067, 2101), 'os.path.join', 'os.path.join', (['"""robotiq2f_open.npy"""'], {}), "('robotiq2f_open.npy')\n", (2079, 2101), False, 'import os\n'), ((4062, 4076), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4074, 4076), True, 'from mayavi import mlab as mayalab\n'), ((4302, 4316), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4314, 4316), True, 'from mayavi import mlab as mayalab\n'), ((4607, 4621), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4619, 4621), True, 'from mayavi import mlab as mayalab\n'), ((4842, 4856), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4854, 4856), True, 'from mayavi import mlab as mayalab\n'), ((5143, 5157), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (5155, 5157), True, 'from mayavi import mlab as mayalab\n'), ((5387, 5401), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (5399, 5401), True, 'from mayavi import mlab as mayalab\n'), ((298, 407), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 0, 1)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 0, 1))\n', (314, 407), True, 'from mayavi import mlab as mayalab\n'), ((1032, 1173), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'color': '(0, 0, 1)', 'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], color=(0, 0, 1), mode='arrow', scale_factor=1.0)\n", (1048, 1173), True, 'from mayavi import mlab as mayalab\n'), ((1368, 1393), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1376, 1393), True, 'import numpy as np\n'), ((3742, 3756), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (3754, 3756), True, 'from mayavi import mlab as mayalab\n'), ((3917, 3953), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (3929, 3953), False, 'import os\n'), ((4158, 4194), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (4170, 4194), False, 'import os\n'), ((4462, 4498), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (4474, 4498), False, 'import os\n'), ((4698, 4734), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (4710, 4734), False, 'import os\n'), ((4997, 5034), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name1'], {}), '(save_dir, gripper_name1)\n', (5009, 5034), False, 'import os\n'), ((5240, 5277), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name2'], {}), '(save_dir, gripper_name2)\n', (5252, 5277), False, 'import os\n'), ((422, 531), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 1, 0)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 1, 0))\n', (438, 531), True, 'from mayavi import mlab as mayalab\n'), ((1198, 1339), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'color': '(0, 1, 0)', 'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], color=(0, 1, 0), mode='arrow', scale_factor=1.0)\n", (1214, 1339), True, 'from mayavi import mlab as mayalab\n'), ((3594, 3630), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (3606, 3630), False, 'import os\n'), ((545, 654), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 1, 1)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 1, 1))\n', (561, 654), True, 'from mayavi import mlab as mayalab\n'), ((678, 783), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': 'color'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=color)\n', (694, 783), True, 'from mayavi import mlab as mayalab\n')]
|
# =========================================================================================
# Copyright 2016 Community Information Online Consortium (CIOC) and KCL Software Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================================
from __future__ import absolute_import
from hashlib import pbkdf2_hmac
from base64 import standard_b64encode
from pyramid.httpexceptions import HTTPFound
from pyramid.security import remember, forget
from formencode import Schema
from sqlalchemy import func
from offlinetools import models
from offlinetools.views.base import ViewBase
from offlinetools.views import validators
from offlinetools.syslanguage import _culture_list, default_culture
DEFAULT_REPEAT = 100000
class LoginSchema(Schema):
allow_extra_fields = True
filter_extra_fields = True
LoginName = validators.UnicodeString(max=50, not_empty=True)
LoginPwd = validators.String(not_empty=True)
came_from = validators.UnicodeString()
class Login(ViewBase):
def post(self):
request = self.request
_ = request.translate
model_state = request.model_state
model_state.schema = LoginSchema()
if not model_state.validate():
return self._get_edit_info()
LoginName = model_state.value('LoginName')
user = request.dbsession.query(models.Users).filter_by(UserName=LoginName).first()
if not user:
model_state.add_error_for('*', _('Invalid User Name or Password'))
return self._get_edit_info()
hash = Crypt(user.PasswordHashSalt, model_state.value('LoginPwd'), user.PasswordHashRepeat)
if hash != user.PasswordHash:
model_state.add_error_for('*', _('Invalid User Name or Password'))
return self._get_edit_info()
headers = remember(request, user.UserName)
start_ln = [x.Culture for x in _culture_list if x.LangID == user.LangID and x.Active]
if not start_ln:
start_ln = [default_culture()]
return HTTPFound(location=model_state.value('came_from', request.route_url('search', ln=start_ln[0])),
headers=headers)
def get(self):
request = self.request
login_url = request.route_url('login')
referrer = request.url
if referrer == login_url:
referrer = request.route_url('search') # never use the login form itself as came_from
came_from = request.params.get('came_from', referrer)
request.model_state.data['came_from'] = came_from
return self._get_edit_info()
def _get_edit_info(self):
request = self.request
session = request.dbsession
user_count = session.query(func.count(models.Users.UserName), func.count(models.Record.NUM)).one()
has_data = any(user_count)
failed_updates = False
has_updated = True
if not has_data:
config = request.config
failed_updates = not not config.update_failure_count
has_updated = not not config.last_update
return {'has_data': has_data, 'failed_updates': failed_updates, 'has_updated': has_updated}
def logout(request):
headers = forget(request)
return HTTPFound(location=request.route_url('login'),
headers=headers)
def Crypt(salt, password, repeat=DEFAULT_REPEAT):
return standard_b64encode(pbkdf2_hmac('sha1', password.encode('utf-8'), salt.encode('utf-8'), repeat, 33)).decode('utf-8').strip()
|
[
"pyramid.security.remember",
"pyramid.security.forget",
"offlinetools.views.validators.UnicodeString",
"offlinetools.syslanguage.default_culture",
"offlinetools.views.validators.String",
"sqlalchemy.func.count"
] |
[((1412, 1460), 'offlinetools.views.validators.UnicodeString', 'validators.UnicodeString', ([], {'max': '(50)', 'not_empty': '(True)'}), '(max=50, not_empty=True)\n', (1436, 1460), False, 'from offlinetools.views import validators\n'), ((1476, 1509), 'offlinetools.views.validators.String', 'validators.String', ([], {'not_empty': '(True)'}), '(not_empty=True)\n', (1493, 1509), False, 'from offlinetools.views import validators\n'), ((1527, 1553), 'offlinetools.views.validators.UnicodeString', 'validators.UnicodeString', ([], {}), '()\n', (1551, 1553), False, 'from offlinetools.views import validators\n'), ((3776, 3791), 'pyramid.security.forget', 'forget', (['request'], {}), '(request)\n', (3782, 3791), False, 'from pyramid.security import remember, forget\n'), ((2389, 2421), 'pyramid.security.remember', 'remember', (['request', 'user.UserName'], {}), '(request, user.UserName)\n', (2397, 2421), False, 'from pyramid.security import remember, forget\n'), ((2565, 2582), 'offlinetools.syslanguage.default_culture', 'default_culture', ([], {}), '()\n', (2580, 2582), False, 'from offlinetools.syslanguage import _culture_list, default_culture\n'), ((3292, 3325), 'sqlalchemy.func.count', 'func.count', (['models.Users.UserName'], {}), '(models.Users.UserName)\n', (3302, 3325), False, 'from sqlalchemy import func\n'), ((3327, 3356), 'sqlalchemy.func.count', 'func.count', (['models.Record.NUM'], {}), '(models.Record.NUM)\n', (3337, 3356), False, 'from sqlalchemy import func\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.